--- ./.hgtags Mon Dec 08 12:28:03 2014 -0800 +++ ./.hgtags Wed Feb 04 12:14:35 2015 -0800 @@ -349,3 +349,29 @@ 01a98532348477a84b6e3c322fdd12dfed28d96d jdk8u31-b10 ec85d5d0e3c05b0b6d61f4fc3f41313448ed9b05 jdk8u31-b11 ca98e3e9727ffdcde2c9980668d0c7f344261938 jdk8u31-b12 +fde671d8b2537b6be61f67e583164b5b8b33ac5b jdk8u31-b13 +0dccc4aca1859b1ff7dca9db214f7f38c4ddbbce jdk8u40-b00 +f8736a40a35df0c8055c8a94b96e5381b381ad33 jdk8u40-b01 +b6a148730f2b14193c308bc9c866c36ee6c08ed3 jdk8u40-b02 +a0224ac4135108efdbcf316c7c4b02f8a93c35fe jdk8u40-b03 +e9473185d77a9278e47fc916a255c4905aa74d98 jdk8u40-b04 +7e286a0c90fb26766b91e3a19467848f39d1f973 jdk8u40-b05 +8881a63f7f00a4a5611800db7715aecc8f6b3601 jdk8u40-b06 +cf9afcfcb7a41b892fc896c1dbf245d5dcc42395 jdk8u40-b07 +515a912fb5a9a61774fa2afa10f4472589be5602 jdk8u40-b08 +0958d0a9f44efcebe30b17b07240abaa42a2112b jdk8u40-b09 +1053aeab6b12d5cd4e063bf9ee37f20811450084 jdk8u40-b10 +2ffefbac794be27e0c60d7e5f2cb598f04ec2411 jdk8u40-b11 +3e6d3c8810ee495ba599443e29b0dc72c0dd53fd jdk8u40-b12 +43ccc2b9d5b7c06baca33162c6aff8900fa76b65 jdk8u40-b13 +df659b8b0bc82214358e2f0ef340612011d5ed3b jdk8u40-b14 +60d2bf063f7d8baa4e0954b6400aa6d30f9064f9 jdk8u40-b15 +64790e9792f88fadd9d25eaf4dcba7e6e96664de jdk8u40-b16 +e1c506c8e1db7356d120dd7d22b2c50276b6bcee jdk8u40-b17 +83d1d42c3df409c87ef7cb0126343a009857ca71 jdk8u40-b18 +ae4980d195b64eec58884b233d7efd312205bac8 jdk8u40-b19 +b31a07adaef50dacba20e376cff6f1096e745092 jdk8u40-b20 +765a17e75fd622f7b892381e23c9b2c531d416f0 jdk8u40-b21 +b6d03a810a61116268fea08517a9632bd66a7363 jdk8u40-b22 +5dd2ad6c7911a1e21f15a28f13ffad662378a3be jdk8u40-b23 +5b37e6757d7c95c9c58b07fb3c9eba234567385a jdk8u40-b24 --- ./Makefile Mon Dec 08 12:28:03 2014 -0800 +++ ./Makefile Wed Feb 04 12:14:35 2015 -0800 @@ -66,7 +66,7 @@ # First, find out the valid targets # Run the makefile with an arbitrary SPEC using -p -q (quiet dry-run and dump rules) to find # available PHONY targets. Use this list as valid targets to pass on to the repeated calls. - all_phony_targets=$(filter-out $(global_targets) bundles bundles-only final-images-only, $(strip $(shell \ + all_phony_targets=$(filter-out $(global_targets), $(strip $(shell \ cd $(root_dir) && $(MAKE) -p -q FRC SPEC=$(firstword $(SPEC)) | \ grep ^.PHONY: | head -n 1 | cut -d " " -f 2-))) @@ -79,10 +79,6 @@ endif endif -# Include this after a potential spec file has been included so that the bundles target -# has access to the spec variables. -include $(root_dir)/make/Jprt.gmk - # Here are "global" targets, i.e. targets that can be executed without specifying a single configuration. # If you addd more global targets, please update the variable global_targets in MakeHelpers. --- ./THIRD_PARTY_README Mon Dec 08 12:28:03 2014 -0800 +++ ./THIRD_PARTY_README Wed Feb 04 12:14:35 2015 -0800 @@ -3385,7 +3385,7 @@ included with JRE 8, JDK 8, and OpenJDK 8. Apache Commons Math 3.2 - Apache Derby 10.10.1.3 + Apache Derby 10.11.1.2 Apache Jakarta BCEL 5.1 Apache Jakarta Regexp 1.4 Apache Santuario XML Security for Java 1.5.4 --- ./common/autoconf/boot-jdk.m4 Mon Dec 08 12:28:03 2014 -0800 +++ ./common/autoconf/boot-jdk.m4 Wed Feb 04 12:14:35 2015 -0800 @@ -301,35 +301,59 @@ [specify JVM arguments to be passed to all invocations of the Boot JDK, overriding the default values, e.g --with-boot-jdk-jvmargs="-Xmx8G -enableassertions"])]) - if test "x$with_boot_jdk_jvmargs" = x; then - # Not all JVM:s accept the same arguments on the command line. - # OpenJDK specific increase in thread stack for JDK build, - # well more specifically, when running javac. - if test "x$BUILD_NUM_BITS" = x32; then - STACK_SIZE=768 - else - # Running Javac on a JVM on a 64-bit machine, the stack takes more space - # since 64-bit pointers are pushed on the stach. Apparently, we need - # to increase the stack space when javacing the JDK.... - STACK_SIZE=1536 - fi + AC_MSG_CHECKING([flags for boot jdk java command] ) - # Minimum amount of heap memory. - ADD_JVM_ARG_IF_OK([-Xms64M],boot_jdk_jvmargs,[$JAVA]) - if test "x$OPENJDK_TARGET_OS" = "xmacosx" || test "x$OPENJDK_TARGET_CPU" = "xppc64" ; then - # Why does macosx need more heap? Its the huge JDK batch. - ADD_JVM_ARG_IF_OK([-Xmx1600M],boot_jdk_jvmargs,[$JAVA]) - else - ADD_JVM_ARG_IF_OK([-Xmx1100M],boot_jdk_jvmargs,[$JAVA]) - fi - # When is adding -client something that speeds up the JVM? - # ADD_JVM_ARG_IF_OK([-client],boot_jdk_jvmargs,[$JAVA]) - ADD_JVM_ARG_IF_OK([-XX:PermSize=32m],boot_jdk_jvmargs,[$JAVA]) - ADD_JVM_ARG_IF_OK([-XX:MaxPermSize=160m],boot_jdk_jvmargs,[$JAVA]) - ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs,[$JAVA]) - # Disable special log output when a debug build is used as Boot JDK... - ADD_JVM_ARG_IF_OK([-XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput],boot_jdk_jvmargs,[$JAVA]) + # Disable special log output when a debug build is used as Boot JDK... + ADD_JVM_ARG_IF_OK([-XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput],boot_jdk_jvmargs,[$JAVA]) + + # Apply user provided options. + ADD_JVM_ARG_IF_OK([$with_boot_jdk_jvmargs],boot_jdk_jvmargs,[$JAVA]) + + AC_MSG_RESULT([$boot_jdk_jvmargs]) + + # For now, general JAVA_FLAGS are the same as the boot jdk jvmargs + JAVA_FLAGS=$boot_jdk_jvmargs + AC_SUBST(JAVA_FLAGS) + + + AC_MSG_CHECKING([flags for boot jdk java command for big workloads]) + + # Starting amount of heap memory. + ADD_JVM_ARG_IF_OK([-Xms64M],boot_jdk_jvmargs_big,[$JAVA]) + + # Maximum amount of heap memory. + # Maximum stack size. + if test "x$BUILD_NUM_BITS" = x32; then + JVM_MAX_HEAP=1100M + STACK_SIZE=768 + else + # Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit + # pointers are used. Apparently, we need to increase the heap and stack + # space for the jvm. More specifically, when running javac to build huge + # jdk batch + JVM_MAX_HEAP=1600M + STACK_SIZE=1536 fi + ADD_JVM_ARG_IF_OK([-Xmx$JVM_MAX_HEAP],boot_jdk_jvmargs_big,[$JAVA]) + ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs_big,[$JAVA]) + ADD_JVM_ARG_IF_OK([-XX:PermSize=32m],boot_jdk_jvmargs_big,[$JAVA]) + ADD_JVM_ARG_IF_OK([-XX:MaxPermSize=160m],boot_jdk_jvmargs_big,[$JAVA]) - AC_SUBST(BOOT_JDK_JVMARGS, $boot_jdk_jvmargs) + AC_MSG_RESULT([$boot_jdk_jvmargs_big]) + + JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big + AC_SUBST(JAVA_FLAGS_BIG) + + + AC_MSG_CHECKING([flags for boot jdk java command for small workloads]) + + # Use serial gc for small short lived tools if possible + ADD_JVM_ARG_IF_OK([-XX:+UseSerialGC],boot_jdk_jvmargs_small,[$JAVA]) + ADD_JVM_ARG_IF_OK([-Xms32M],boot_jdk_jvmargs_small,[$JAVA]) + ADD_JVM_ARG_IF_OK([-Xmx512M],boot_jdk_jvmargs_small,[$JAVA]) + + AC_MSG_RESULT([$boot_jdk_jvmargs_small]) + + JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small + AC_SUBST(JAVA_FLAGS_SMALL) ]) --- ./common/autoconf/generated-configure.sh Mon Dec 08 12:28:03 2014 -0800 +++ ./common/autoconf/generated-configure.sh Wed Feb 04 12:14:35 2015 -0800 @@ -786,7 +786,9 @@ JAXP_TOPDIR CORBA_TOPDIR LANGTOOLS_TOPDIR -BOOT_JDK_JVMARGS +JAVA_FLAGS_SMALL +JAVA_FLAGS_BIG +JAVA_FLAGS JAVAC_FLAGS BOOT_JDK_SOURCETARGET BOOT_JDK @@ -7928,7 +7930,7 @@ fi # Replace the commas with AND for use in the build directory name. - ANDED_JVM_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/^,//' -e 's/,$//' -e 's/,/AND/'` + ANDED_JVM_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/^,//' -e 's/,$//' -e 's/,/AND/g'` COUNT_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,/1/' -e 's/client,/1/' -e 's/minimal1,/1/' -e 's/kernel,/1/' -e 's/zero,/1/' -e 's/zeroshark,/1/' -e 's/core,/1/'` if test "x$COUNT_VARIANTS" != "x,1"; then BUILDING_MULTIPLE_JVM_VARIANTS=yes @@ -16086,117 +16088,10 @@ fi - if test "x$with_boot_jdk_jvmargs" = x; then - # Not all JVM:s accept the same arguments on the command line. - # OpenJDK specific increase in thread stack for JDK build, - # well more specifically, when running javac. - if test "x$BUILD_NUM_BITS" = x32; then - STACK_SIZE=768 - else - # Running Javac on a JVM on a 64-bit machine, the stack takes more space - # since 64-bit pointers are pushed on the stach. Apparently, we need - # to increase the stack space when javacing the JDK.... - STACK_SIZE=1536 - fi - - # Minimum amount of heap memory. - - $ECHO "Check if jvm arg is ok: -Xms64M" >&5 - $ECHO "Command: $JAVA -Xms64M -version" >&5 - OUTPUT=`$JAVA -Xms64M -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs="$boot_jdk_jvmargs -Xms64M" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - if test "x$OPENJDK_TARGET_OS" = "xmacosx" || test "x$OPENJDK_TARGET_CPU" = "xppc64" ; then - # Why does macosx need more heap? Its the huge JDK batch. - - $ECHO "Check if jvm arg is ok: -Xmx1600M" >&5 - $ECHO "Command: $JAVA -Xmx1600M -version" >&5 - OUTPUT=`$JAVA -Xmx1600M -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs="$boot_jdk_jvmargs -Xmx1600M" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - else - - $ECHO "Check if jvm arg is ok: -Xmx1100M" >&5 - $ECHO "Command: $JAVA -Xmx1100M -version" >&5 - OUTPUT=`$JAVA -Xmx1100M -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs="$boot_jdk_jvmargs -Xmx1100M" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - fi - # When is adding -client something that speeds up the JVM? - # ADD_JVM_ARG_IF_OK([-client],boot_jdk_jvmargs,[$JAVA]) - - $ECHO "Check if jvm arg is ok: -XX:PermSize=32m" >&5 - $ECHO "Command: $JAVA -XX:PermSize=32m -version" >&5 - OUTPUT=`$JAVA -XX:PermSize=32m -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:PermSize=32m" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - - $ECHO "Check if jvm arg is ok: -XX:MaxPermSize=160m" >&5 - $ECHO "Command: $JAVA -XX:MaxPermSize=160m -version" >&5 - OUTPUT=`$JAVA -XX:MaxPermSize=160m -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:MaxPermSize=160m" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - - $ECHO "Check if jvm arg is ok: -XX:ThreadStackSize=$STACK_SIZE" >&5 - $ECHO "Command: $JAVA -XX:ThreadStackSize=$STACK_SIZE -version" >&5 - OUTPUT=`$JAVA -XX:ThreadStackSize=$STACK_SIZE -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:ThreadStackSize=$STACK_SIZE" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - # Disable special log output when a debug build is used as Boot JDK... + { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command " >&5 +$as_echo_n "checking flags for boot jdk java command ... " >&6; } + + # Disable special log output when a debug build is used as Boot JDK... $ECHO "Check if jvm arg is ok: -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" >&5 $ECHO "Command: $JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version" >&5 @@ -16212,9 +16107,187 @@ JVM_ARG_OK=false fi - fi - - BOOT_JDK_JVMARGS=$boot_jdk_jvmargs + + # Apply user provided options. + + $ECHO "Check if jvm arg is ok: $with_boot_jdk_jvmargs" >&5 + $ECHO "Command: $JAVA $with_boot_jdk_jvmargs -version" >&5 + OUTPUT=`$JAVA $with_boot_jdk_jvmargs -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs="$boot_jdk_jvmargs $with_boot_jdk_jvmargs" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs" >&5 +$as_echo "$boot_jdk_jvmargs" >&6; } + + # For now, general JAVA_FLAGS are the same as the boot jdk jvmargs + JAVA_FLAGS=$boot_jdk_jvmargs + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for big workloads" >&5 +$as_echo_n "checking flags for boot jdk java command for big workloads... " >&6; } + + # Starting amount of heap memory. + + $ECHO "Check if jvm arg is ok: -Xms64M" >&5 + $ECHO "Command: $JAVA -Xms64M -version" >&5 + OUTPUT=`$JAVA -Xms64M -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xms64M" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + # Maximum amount of heap memory. + # Maximum stack size. + if test "x$BUILD_NUM_BITS" = x32; then + JVM_MAX_HEAP=1100M + STACK_SIZE=768 + else + # Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit + # pointers are used. Apparently, we need to increase the heap and stack + # space for the jvm. More specifically, when running javac to build huge + # jdk batch + JVM_MAX_HEAP=1600M + STACK_SIZE=1536 + fi + + $ECHO "Check if jvm arg is ok: -Xmx$JVM_MAX_HEAP" >&5 + $ECHO "Command: $JAVA -Xmx$JVM_MAX_HEAP -version" >&5 + OUTPUT=`$JAVA -Xmx$JVM_MAX_HEAP -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xmx$JVM_MAX_HEAP" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + $ECHO "Check if jvm arg is ok: -XX:ThreadStackSize=$STACK_SIZE" >&5 + $ECHO "Command: $JAVA -XX:ThreadStackSize=$STACK_SIZE -version" >&5 + OUTPUT=`$JAVA -XX:ThreadStackSize=$STACK_SIZE -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -XX:ThreadStackSize=$STACK_SIZE" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + $ECHO "Check if jvm arg is ok: -XX:PermSize=32m" >&5 + $ECHO "Command: $JAVA -XX:PermSize=32m -version" >&5 + OUTPUT=`$JAVA -XX:PermSize=32m -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -XX:PermSize=32m" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + $ECHO "Check if jvm arg is ok: -XX:MaxPermSize=160m" >&5 + $ECHO "Command: $JAVA -XX:MaxPermSize=160m -version" >&5 + OUTPUT=`$JAVA -XX:MaxPermSize=160m -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -XX:MaxPermSize=160m" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_big" >&5 +$as_echo "$boot_jdk_jvmargs_big" >&6; } + + JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for small workloads" >&5 +$as_echo_n "checking flags for boot jdk java command for small workloads... " >&6; } + + # Use serial gc for small short lived tools if possible + + $ECHO "Check if jvm arg is ok: -XX:+UseSerialGC" >&5 + $ECHO "Command: $JAVA -XX:+UseSerialGC -version" >&5 + OUTPUT=`$JAVA -XX:+UseSerialGC -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -XX:+UseSerialGC" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + $ECHO "Check if jvm arg is ok: -Xms32M" >&5 + $ECHO "Command: $JAVA -Xms32M -version" >&5 + OUTPUT=`$JAVA -Xms32M -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xms32M" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + $ECHO "Check if jvm arg is ok: -Xmx512M" >&5 + $ECHO "Command: $JAVA -Xmx512M -version" >&5 + OUTPUT=`$JAVA -Xmx512M -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xmx512M" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_small" >&5 +$as_echo "$boot_jdk_jvmargs_small" >&6; } + + JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small --- ./common/autoconf/jdk-options.m4 Mon Dec 08 12:28:03 2014 -0800 +++ ./common/autoconf/jdk-options.m4 Wed Feb 04 12:14:35 2015 -0800 @@ -134,7 +134,7 @@ fi # Replace the commas with AND for use in the build directory name. - ANDED_JVM_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/^,//' -e 's/,$//' -e 's/,/AND/'` + ANDED_JVM_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/^,//' -e 's/,$//' -e 's/,/AND/g'` COUNT_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,/1/' -e 's/client,/1/' -e 's/minimal1,/1/' -e 's/kernel,/1/' -e 's/zero,/1/' -e 's/zeroshark,/1/' -e 's/core,/1/'` if test "x$COUNT_VARIANTS" != "x,1"; then BUILDING_MULTIPLE_JVM_VARIANTS=yes --- ./common/autoconf/spec.gmk.in Mon Dec 08 12:28:03 2014 -0800 +++ ./common/autoconf/spec.gmk.in Wed Feb 04 12:14:35 2015 -0800 @@ -249,7 +249,6 @@ # The boot jdk to use BOOT_JDK:=@BOOT_JDK@ -BOOT_JDK_JVMARGS:=@BOOT_JDK_JVMARGS@ BOOT_RTJAR:=@BOOT_RTJAR@ BOOT_TOOLSJAR=$(BOOT_JDK)/lib/tools.jar @@ -436,9 +435,12 @@ POST_STRIP_CMD:=@POST_STRIP_CMD@ POST_MCS_CMD:=@POST_MCS_CMD@ -JAVA_FLAGS:=@BOOT_JDK_JVMARGS@ +JAVA_FLAGS:=@JAVA_FLAGS@ +JAVA_FLAGS_BIG:=@JAVA_FLAGS_BIG@ +JAVA_FLAGS_SMALL:=@JAVA_FLAGS_SMALL@ -JAVA=@FIXPATH@ $(BOOT_JDK)/bin/java $(JAVA_FLAGS) +JAVA=@FIXPATH@ $(BOOT_JDK)/bin/java $(JAVA_FLAGS_BIG) $(JAVA_FLAGS) +JAVA_SMALL=@FIXPATH@ $(BOOT_JDK)/bin/java $(JAVA_FLAGS_SMALL) $(JAVA_FLAGS) JAVAC=@FIXPATH@ $(BOOT_JDK)/bin/javac # Hotspot sets this variable before reading the SPEC when compiling sa-jdi.jar. Avoid @@ -451,7 +453,7 @@ RMIC=@FIXPATH@ $(BOOT_JDK)/bin/rmic -NATIVE2ASCII=@FIXPATH@ $(BOOT_JDK)/bin/native2ascii +NATIVE2ASCII=@FIXPATH@ $(BOOT_JDK)/bin/native2ascii $(addprefix -J, $(JAVA_FLAGS_SMALL)) JARSIGNER=@FIXPATH@ $(BOOT_JDK)/bin/jarsigner --- ./common/bin/hgforest.sh Mon Dec 08 12:28:03 2014 -0800 +++ ./common/bin/hgforest.sh Wed Feb 04 12:14:35 2015 -0800 @@ -1,7 +1,6 @@ #!/bin/sh - # -# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -23,165 +22,375 @@ # questions. # -# Shell script for a fast parallel forest command -command="$1" -pull_extra_base="$2" +# Shell script for a fast parallel forest/trees command -if [ "" = "$command" ] ; then - echo No command to hg supplied! - exit 1 +usage() { + echo "usage: $0 [-h|--help] [-q|--quiet] [-v|--verbose] [-s|--sequential] [--] [commands...]" > ${status_output} + echo "Environment variables which modify behaviour:" + echo " HGFOREST_QUIET : (boolean) If 'true' then standard output is redirected to /dev/null" + echo " HGFOREST_VERBOSE : (boolean) If 'true' then Mercurial asked to produce verbose output" + echo " HGFOREST_SEQUENTIAL : (boolean) If 'true' then repos are processed sequentially. Disables concurrency" + echo " HGFOREST_GLOBALOPTS : (string, must begin with space) Additional Mercurial global options" + echo " HGFOREST_REDIRECT : (file path) Redirect standard output to specified file" + echo " HGFOREST_FIFOS : (boolean) Default behaviour for FIFO detection. Does not override FIFOs disabled" + echo " HGFOREST_CONCURRENCY: (positive integer) Number of repos to process concurrently" + echo " HGFOREST_DEBUG : (boolean) If 'true' then temp files are retained" + exit 1 +} + +global_opts="${HGFOREST_GLOBALOPTS:-}" +status_output="${HGFOREST_REDIRECT:-/dev/stdout}" +qflag="${HGFOREST_QUIET:-false}" +vflag="${HGFOREST_VERBOSE:-false}" +sflag="${HGFOREST_SEQUENTIAL:-false}" +while [ $# -gt 0 ] +do + case $1 in + -h | --help ) + usage + ;; + + -q | --quiet ) + qflag="true" + ;; + + -v | --verbose ) + vflag="true" + ;; + + -s | --sequential ) + sflag="true" + ;; + + '--' ) # no more options + shift; break + ;; + + -*) # bad option + usage + ;; + + * ) # non option + break + ;; + esac + shift +done + +# silence standard output? +if [ ${qflag} = "true" ] ; then + global_opts="${global_opts} -q" + status_output="/dev/null" fi +# verbose output? +if [ ${vflag} = "true" ] ; then + global_opts="${global_opts} -v" +fi + +# Make sure we have a command. +if [ $# -lt 1 -o -z "${1:-}" ] ; then + echo "ERROR: No command to hg supplied!" + usage +fi + +command="$1"; shift +command_args="${@:-}" + # Clean out the temporary directory that stores the pid files. tmp=/tmp/forest.$$ rm -f -r ${tmp} mkdir -p ${tmp} + +if [ "${HGFOREST_DEBUG:-false}" = "true" ] ; then + echo "DEBUG: temp files are in: ${tmp}" +fi + +# Check if we can use fifos for monitoring sub-process completion. +echo "1" > ${tmp}/read +while_subshell=1 +while read line; do + while_subshell=0 + break; +done < ${tmp}/read +rm ${tmp}/read + +on_windows=`uname -s | egrep -ic -e 'cygwin|msys'` + +if [ ${while_subshell} = "1" -o ${on_windows} = "1" ]; then + # cygwin has (2014-04-18) broken (single writer only) FIFOs + # msys has (2014-04-18) no FIFOs. + # older shells create a sub-shell for redirect to while + have_fifos="false" +else + have_fifos="${HGFOREST_FIFOS:-true}" +fi + safe_interrupt () { if [ -d ${tmp} ]; then if [ "`ls ${tmp}/*.pid`" != "" ]; then - echo "Waiting for processes ( `cat ${tmp}/*.pid | tr '\n' ' '`) to terminate nicely!" + echo "Waiting for processes ( `cat ${tmp}/.*.pid ${tmp}/*.pid 2> /dev/null | tr '\n' ' '`) to terminate nicely!" > ${status_output} sleep 1 # Pipe stderr to dev/null to silence kill, that complains when trying to kill # a subprocess that has already exited. kill -TERM `cat ${tmp}/*.pid | tr '\n' ' '` 2> /dev/null wait - echo Interrupt complete! + echo "Interrupt complete!" > ${status_output} fi + rm -f -r ${tmp} fi - rm -f -r ${tmp} - exit 1 + exit 130 } nice_exit () { if [ -d ${tmp} ]; then - if [ "`ls ${tmp}`" != "" ]; then + if [ "`ls -A ${tmp} 2> /dev/null`" != "" ]; then wait fi + if [ "${HGFOREST_DEBUG:-false}" != "true" ] ; then + rm -f -r ${tmp} + fi fi - rm -f -r ${tmp} } trap 'safe_interrupt' INT QUIT trap 'nice_exit' EXIT +subrepos="corba jaxp jaxws langtools jdk hotspot nashorn" +subrepos_extra="jdk/src/closed jdk/make/closed jdk/test/closed hotspot/make/closed hotspot/src/closed hotspot/test/closed deploy install sponsors pubs" + # Only look in specific locations for possible forests (avoids long searches) pull_default="" repos="" repos_extra="" -if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then - subrepos="corba jaxp jaxws langtools jdk hotspot nashorn" - if [ -f .hg/hgrc ] ; then - pull_default=`hg paths default` - if [ "${pull_default}" = "" ] ; then - echo "ERROR: Need initial clone with 'hg paths default' defined" - exit 1 - fi - fi - if [ "${pull_default}" = "" ] ; then - echo "ERROR: Need initial repository to use this script" +if [ "${command}" = "clone" -o "${command}" = "fclone" -o "${command}" = "tclone" ] ; then + # we must be a clone + if [ ! -f .hg/hgrc ] ; then + echo "ERROR: Need initial repository to use this script" > ${status_output} exit 1 fi + + # the clone must know where it came from (have a default pull path). + pull_default=`hg paths default` + if [ "${pull_default}" = "" ] ; then + echo "ERROR: Need initial clone with 'hg paths default' defined" > ${status_output} + exit 1 + fi + + # determine which sub repos need to be cloned. for i in ${subrepos} ; do if [ ! -f ${i}/.hg/hgrc ] ; then repos="${repos} ${i}" fi done - if [ "${pull_extra_base}" != "" ] ; then - subrepos_extra="jdk/src/closed jdk/make/closed jdk/test/closed hotspot/make/closed hotspot/src/closed hotspot/test/closed deploy install sponsors pubs" - pull_default_tail=`echo ${pull_default} | sed -e 's@^.*://[^/]*/\(.*\)@\1@'` - pull_extra="${pull_extra_base}/${pull_default_tail}" + + pull_default_tail=`echo ${pull_default} | sed -e 's@^.*://[^/]*/\(.*\)@\1@'` + + if [ -n "${command_args}" ] ; then + # if there is an "extra sources" path then reparent "extra" repos to that path + if [ "x${pull_default}" = "x${pull_default_tail}" ] ; then + echo "ERROR: Need initial clone from non-local source" > ${status_output} + exit 1 + fi + pull_extra="${command_args}/${pull_default_tail}" + + # determine which extra subrepos need to be cloned. for i in ${subrepos_extra} ; do if [ ! -f ${i}/.hg/hgrc ] ; then repos_extra="${repos_extra} ${i}" fi done + else + if [ "x${pull_default}" = "x${pull_default_tail}" ] ; then + # local source repo. Clone the "extra" subrepos that exist there. + for i in ${subrepos_extra} ; do + if [ -f ${pull_default}/${i}/.hg/hgrc -a ! -f ${i}/.hg/hgrc ] ; then + # sub-repo there in source but not here + repos_extra="${repos_extra} ${i}" + fi + done + fi fi - at_a_time=2 + # Any repos to deal with? if [ "${repos}" = "" -a "${repos_extra}" = "" ] ; then + echo "No repositories to process." > ${status_output} exit fi + + # Repos to process concurrently. Clone does better with low concurrency. + at_a_time="${HGFOREST_CONCURRENCY:-2}" else - hgdirs=`ls -d ./.hg ./*/.hg ./*/*/.hg ./*/*/*/.hg ./*/*/*/*/.hg 2>/dev/null` - # Derive repository names from the .hg directory locations - for i in ${hgdirs} ; do - repos="${repos} `echo ${i} | sed -e 's@/.hg$@@'`" + # Process command for all of the present repos + for i in . ${subrepos} ${subrepos_extra} ; do + if [ -d ${i}/.hg ] ; then + repos="${repos} ${i}" + fi done + + # Any repos to deal with? + if [ "${repos}" = "" ] ; then + echo "No repositories to process." > ${status_output} + exit + fi + + # any of the repos locked? + locked="" for i in ${repos} ; do if [ -h ${i}/.hg/store/lock -o -f ${i}/.hg/store/lock ] ; then locked="${i} ${locked}" fi done - at_a_time=8 - # Any repos to deal with? - if [ "${repos}" = "" ] ; then - echo "No repositories to process." - exit + if [ "${locked}" != "" ] ; then + echo "ERROR: These repositories are locked: ${locked}" > ${status_output} + exit 1 fi - if [ "${locked}" != "" ] ; then - echo "These repositories are locked: ${locked}" - exit - fi + + # Repos to process concurrently. + at_a_time="${HGFOREST_CONCURRENCY:-8}" fi # Echo out what repositories we do a command on. -echo "# Repositories: ${repos} ${repos_extra}" -echo +echo "# Repositories: ${repos} ${repos_extra}" > ${status_output} -# Run the supplied command on all repos in parallel. -n=0 -for i in ${repos} ${repos_extra} ; do - n=`expr ${n} '+' 1` - repopidfile=`echo ${i} | sed -e 's@./@@' -e 's@/@_@g'` - reponame=`echo ${i} | sed -e :a -e 's/^.\{1,20\}$/ &/;ta'` - pull_base="${pull_default}" - for j in $repos_extra ; do - if [ "$i" = "$j" ] ; then - pull_base="${pull_extra}" - fi - done +if [ "${command}" = "serve" ] ; then + # "serve" is run for all the repos as one command. ( ( - if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then - pull_newrepo="`echo ${pull_base}/${i} | sed -e 's@\([^:]/\)//*@\1@g'`" - echo hg clone ${pull_newrepo} ${i} - path="`dirname ${i}`" - if [ "${path}" != "." ] ; then - times=0 - while [ ! -d "${path}" ] ## nested repo, ensure containing dir exists - do - times=`expr ${times} '+' 1` - if [ `expr ${times} '%' 10` -eq 0 ] ; then - echo ${path} still not created, waiting... + cwd=`pwd` + serving=`basename ${cwd}` + ( + echo "[web]" + echo "description = ${serving}" + echo "allow_push = *" + echo "push_ssl = False" + + echo "[paths]" + for i in ${repos} ; do + if [ "${i}" != "." ] ; then + echo "/${serving}/${i} = ${i}" + else + echo "/${serving} = ${cwd}" + fi + done + ) > ${tmp}/serve.web-conf + + echo "serving root repo ${serving}" > ${status_output} + + echo "hg${global_opts} serve" > ${status_output} + (PYTHONUNBUFFERED=true hg${global_opts} serve -A ${status_output} -E ${status_output} --pid-file ${tmp}/serve.pid --web-conf ${tmp}/serve.web-conf; echo "$?" > ${tmp}/serve.pid.rc ) 2>&1 & + ) 2>&1 | sed -e "s@^@serve: @" > ${status_output} + ) & +else + # Run the supplied command on all repos in parallel. + + # n is the number of subprocess started or which might still be running. + n=0 + if [ ${have_fifos} = "true" ]; then + # if we have fifos use them to detect command completion. + mkfifo ${tmp}/fifo + exec 3<>${tmp}/fifo + fi + + # iterate over all of the subrepos. + for i in ${repos} ${repos_extra} ; do + n=`expr ${n} '+' 1` + repopidfile=`echo ${i} | sed -e 's@./@@' -e 's@/@_@g'` + reponame=`echo ${i} | sed -e :a -e 's/^.\{1,20\}$/ &/;ta'` + pull_base="${pull_default}" + + # regular repo or "extra" repo? + for j in ${repos_extra} ; do + if [ "${i}" = "${j}" ] ; then + # it's an "extra" + pull_base="${pull_extra}" + fi + done + + # remove trailing slash + pull_base="`echo ${pull_base} | sed -e 's@[/]*$@@'`" + + # execute the command on the subrepo + ( + ( + if [ "${command}" = "clone" -o "${command}" = "fclone" -o "${command}" = "tclone" ] ; then + # some form of clone + clone_newrepo="${pull_base}/${i}" + parent_path="`dirname ${i}`" + if [ "${parent_path}" != "." ] ; then + times=0 + while [ ! -d "${parent_path}" ] ; do ## nested repo, ensure containing dir exists + if [ "${sflag}" = "true" ] ; then + # Missing parent is fatal during sequential operation. + echo "ERROR: Missing parent path: ${parent_path}" > ${status_output} + exit 1 + fi + times=`expr ${times} '+' 1` + if [ `expr ${times} '%' 10` -eq 0 ] ; then + echo "${parent_path} still not created, waiting..." > ${status_output} + fi + sleep 5 + done + fi + # run the clone command. + echo "hg${global_opts} clone ${clone_newrepo} ${i}" > ${status_output} + (PYTHONUNBUFFERED=true hg${global_opts} clone ${clone_newrepo} ${i}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 & + else + # run the command. + echo "cd ${i} && hg${global_opts} ${command} ${command_args}" > ${status_output} + cd ${i} && (PYTHONUNBUFFERED=true hg${global_opts} ${command} ${command_args}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 & + fi + + echo $! > ${tmp}/${repopidfile}.pid + ) 2>&1 | sed -e "s@^@${reponame}: @" > ${status_output} + # tell the fifo waiter that this subprocess is done. + if [ ${have_fifos} = "true" ]; then + echo "${i}" >&3 + fi + ) & + + if [ "${sflag}" = "true" ] ; then + # complete this task before starting another. + wait + else + if [ "${have_fifos}" = "true" ]; then + # check on count of running subprocesses and possibly wait for completion + if [ ${n} -ge ${at_a_time} ] ; then + # read will block until there are completed subprocesses + while read repo_done; do + n=`expr ${n} '-' 1` + if [ ${n} -lt ${at_a_time} ] ; then + # we should start more subprocesses + break; fi - sleep 5 - done + done <&3 fi - (PYTHONUNBUFFERED=true hg clone ${pull_newrepo} ${i}; echo "$?" > ${tmp}/${repopidfile}.pid.rc )& else - echo "cd ${i} && hg $*" - cd ${i} && (PYTHONUNBUFFERED=true hg "$@"; echo "$?" > ${tmp}/${repopidfile}.pid.rc )& + # Compare completions to starts + completed="`(ls -a1 ${tmp}/*.pid.rc 2> /dev/null | wc -l) || echo 0`" + while [ `expr ${n} '-' ${completed}` -ge ${at_a_time} ] ; do + # sleep a short time to give time for something to complete + sleep 1 + completed="`(ls -a1 ${tmp}/*.pid.rc 2> /dev/null | wc -l) || echo 0`" + done fi - echo $! > ${tmp}/${repopidfile}.pid - ) 2>&1 | sed -e "s@^@${reponame}: @") & + fi + done +fi - if [ `expr ${n} '%' ${at_a_time}` -eq 0 ] ; then - sleep 2 - echo Waiting 5 secs before spawning next background command. - sleep 3 - fi -done -# Wait for all hg commands to complete +# Wait for all subprocesses to complete wait # Terminate with exit 0 only if all subprocesses were successful ec=0 if [ -d ${tmp} ]; then - for rc in ${tmp}/*.pid.rc ; do + rcfiles="`(ls -a ${tmp}/*.pid.rc 2> /dev/null) || echo ''`" + for rc in ${rcfiles} ; do exit_code=`cat ${rc} | tr -d ' \n\r'` if [ "${exit_code}" != "0" ] ; then - echo "WARNING: ${rc} exited abnormally." + repo="`echo ${rc} | sed -e 's@^'${tmp}'@@' -e 's@/*\([^/]*\)\.pid\.rc$@\1@' -e 's@_@/@g'`" + echo "WARNING: ${repo} exited abnormally (${exit_code})" > ${status_output} ec=1 fi done --- ./get_source.sh Mon Dec 08 12:28:03 2014 -0800 +++ ./get_source.sh Wed Feb 04 12:14:35 2015 -0800 @@ -1,7 +1,7 @@ #!/bin/sh # -# Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -25,9 +25,76 @@ # questions. # -# Get clones of all nested repositories -sh ./common/bin/hgforest.sh clone "$@" || exit 1 +to_stderr() { + echo "$@" >&2 +} + +error() { + to_stderr "ERROR: $1" + exit ${2:-126} +} + +warning() { + to_stderr "WARNING: $1" +} + +version_field() { + # rev is typically omitted for minor and major releases + field=`echo ${1}.0 | cut -f ${2} -d .` + if expr 1 + $field >/dev/null 2> /dev/null; then + echo $field + else + echo -1 + fi +} + +# Version check + +# required +reqdmajor=1 +reqdminor=4 +reqdrev=0 + +# requested +rqstmajor=2 +rqstminor=6 +rqstrev=3 + + +# installed +hgwhere="`command -v hg`" +if [ "x$hgwhere" = "x" ]; then + error "Could not locate Mercurial command" +fi + +hgversion="`hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`" +if [ "x${hgversion}" = "x" ] ; then + error "Could not determine Mercurial version of $hgwhere" +fi + +hgmajor="`version_field $hgversion 1`" +hgminor="`version_field $hgversion 2`" +hgrev="`version_field $hgversion 3`" + +if [ $hgmajor -eq -1 -o $hgminor -eq -1 -o $hgrev -eq -1 ] ; then + error "Could not determine Mercurial version of $hgwhere from \"$hgversion\"" +fi + + +# Require +if [ $hgmajor -lt $reqdmajor -o \( $hgmajor -eq $reqdmajor -a $hgminor -lt $reqdminor \) -o \( $hgmajor -eq $reqdmajor -a $hgminor -eq $reqdminor -a $hgrev -lt $reqdrev \) ] ; then + error "Mercurial version $reqdmajor.$reqdminor.$reqdrev or later is required. $hgwhere is version $hgversion" +fi + + +# Request +if [ $hgmajor -lt $rqstmajor -o \( $hgmajor -eq $rqstmajor -a $hgminor -lt $rqstminor \) -o \( $hgmajor -eq $rqstmajor -a $hgminor -eq $rqstminor -a $hgrev -lt $rqstrev \) ] ; then + warning "Mercurial version $rqstmajor.$rqstminor.$rqstrev or later is recommended. $hgwhere is version $hgversion" +fi + + +# Get clones of all absent nested repositories (harmless if already exist) +sh ./common/bin/hgforest.sh clone "$@" || exit $? # Update all existing repositories to the latest sources sh ./common/bin/hgforest.sh pull -u - --- ./make/Javadoc.gmk Mon Dec 08 12:28:03 2014 -0800 +++ ./make/Javadoc.gmk Wed Feb 04 12:14:35 2015 -0800 @@ -70,6 +70,7 @@ SCTPAPI_FIRST_COPYRIGHT_YEAR = 2009 TRACING_FIRST_COPYRIGHT_YEAR = 2008 TREEAPI_FIRST_COPYRIGHT_YEAR = 2005 +NASHORNAPI_FIRST_COPYRIGHT_YEAR = 2014 JNLP_FIRST_COPYRIGHT_YEAR = 1998 PLUGIN2_FIRST_COPYRIGHT_YEAR = 2007 JDKNET_FIRST_COPYRIGHT_YEAR = 2014 @@ -113,10 +114,10 @@ # Url to devdocs page # Was: http://java.sun.com/javase/6/webnotes/devdocs-vs-specs.html -DEV_DOCS_URL-7 = http://download.oracle.com/javase/7/docs/index.html -DEV_DOCS_URL-8 = http://download.oracle.com/javase/8/docs/index.html +DEV_DOCS_URL-7 = https://docs.oracle.com/javase/7/docs/index.html +DEV_DOCS_URL-8 = https://docs.oracle.com/javase/8/docs/index.html DEV_DOCS_URL = $(DEV_DOCS_URL-$(JDK_MINOR_VERSION)) -DOCS_BASE_URL = http://download.oracle.com/javase/7/docs +DOCS_BASE_URL = https://docs.oracle.com/javase/7/docs # Common Java trademark line JAVA_TRADEMARK_LINE = Java is a trademark or registered trademark of \ @@ -137,6 +138,7 @@ $(JDK_OUTPUTDIR)/gendocsrc_rmic \ $(JDK_TOPDIR)/src/solaris/classes \ $(JDK_TOPDIR)/src/windows/classes \ + $(NASHORN_TOPDIR)/src/ \ $(JDK_SHARE_SRC)/doc/stub # List of directories that actually exist @@ -311,6 +313,13 @@ ################################################################# # +# Load custom Javadoc rules, if present +# +-include $(CUSTOM_MAKE_DIR)/Javadoc.gmk + +################################################################# + +# # Default target is same as docs target, create core api and all others it can # @@ -1095,6 +1104,60 @@ ############################################################# # +# nashornapidocs +# + +ALL_OTHER_TARGETS += nashornapidocs + +NASHORNAPI_DOCDIR := $(JDK_API_DOCSDIR)/nashorn +NASHORNAPI2COREAPI := ../$(JDKJRE2COREAPI) +NASHORNAPI_DOCTITLE := Nashorn API +NASHORNAPI_WINDOWTITLE := Nashorn API +NASHORNAPI_HEADER := Nashorn API +NASHORNAPI_BOTTOM := $(call CommonBottom,$(NASHORNAPI_FIRST_COPYRIGHT_YEAR)) +NASHORNAPI_GROUPNAME := Packages +NASHORNAPI_REGEXP := jdk.nashorn.api.scripting.* + +# NASHORNAPI_PKGS is located in NON_CORE_PKGS.gmk + +NASHORNAPI_INDEX_HTML = $(NASHORNAPI_DOCDIR)/index.html +NASHORNAPI_OPTIONS_FILE = $(DOCSTMPDIR)/nashornapi.options +NASHORNAPI_PACKAGES_FILE = $(DOCSTMPDIR)/nashornapi.packages + +nashornapidocs: $(NASHORNAPI_INDEX_HTML) + +# Set relative location to core api document root +$(NASHORNAPI_INDEX_HTML): GET2DOCSDIR=$(NASHORNAPI2COREAPI)/.. + +# Run javadoc if the index file is out of date or missing +$(NASHORNAPI_INDEX_HTML): $(NASHORNAPI_OPTIONS_FILE) $(NASHORNAPI_PACKAGES_FILE) coredocs + $(prep-javadoc) + $(call JavadocSummary,$(NASHORNAPI_OPTIONS_FILE),$(NASHORNAPI_PACKAGES_FILE)) + $(JAVADOC_CMD) -d $(@D) \ + @$(NASHORNAPI_OPTIONS_FILE) @$(NASHORNAPI_PACKAGES_FILE) + +# Create file with javadoc options in it +$(NASHORNAPI_OPTIONS_FILE): + $(prep-target) + @($(call OptionOnly,$(COMMON_JAVADOCFLAGS)) ; \ + $(call OptionPair,-sourcepath,$(RELEASEDOCS_SOURCEPATH)) ; \ + $(call OptionPair,-encoding,ascii) ; \ + $(call OptionPair,-doctitle,$(NASHORNAPI_DOCTITLE)) ; \ + $(call OptionPair,-windowtitle,$(NASHORNAPI_WINDOWTITLE) $(DRAFT_WINTITLE)); \ + $(call OptionPair,-header,$(NASHORNAPI_HEADER)$(DRAFT_HEADER)) ; \ + $(call OptionPair,-tag,$(TAG_JLS)) ; \ + $(call OptionPair,-bottom,$(NASHORNAPI_BOTTOM)$(DRAFT_BOTTOM)) ; \ + $(call OptionTrip,-group,$(NASHORNAPI_GROUPNAME),$(NASHORNAPI_REGEXP)); \ + $(call OptionTrip,-linkoffline,$(NASHORNAPI2COREAPI),$(COREAPI_DOCSDIR)/); \ + ) >> $@ + +# Create a file with the package names in it +$(NASHORNAPI_PACKAGES_FILE): $(DIRECTORY_CACHE) $(call PackageDependencies,$(NASHORNAPI_PKGS)) + $(prep-target) + $(call PackageFilter,$(NASHORNAPI_PKGS)) + +############################################################# +# # sctpdocs # --- ./make/Jprt.gmk Mon Dec 08 12:28:03 2014 -0800 +++ ./make/Jprt.gmk Wed Feb 04 12:14:35 2015 -0800 @@ -23,155 +23,11 @@ # questions. # -# This file is included by the root NewerMakefile and contains targets -# and utilities needed by JPRT. +# This file is contains targets utilities needed by JPRT. -# Utilities used in this Makefile. Most of this makefile executes without -# the context of a spec file from configure. -CAT=cat -CMP=cmp -CP=cp -ECHO=echo -MKDIR=mkdir -PRINTF=printf -PWD=pwd -# Insure we have a path that looks like it came from pwd -# (This is mostly for Windows sake and drive letters) -define UnixPath # path -$(shell (cd "$1" && $(PWD))) -endef - -BUILD_DIR_ROOT:=$(root_dir)/build - -ifdef OPENJDK - OPEN_BUILD=true -else - OPEN_BUILD := $(if $(or $(wildcard $(root_dir)/jdk/src/closed), \ - $(wildcard $(root_dir)/jdk/make/closed), \ - $(wildcard $(root_dir)/jdk/test/closed), \ - $(wildcard $(root_dir)/hotspot/src/closed), \ - $(wildcard $(root_dir)/hotspot/make/closed), \ - $(wildcard $(root_dir)/hotspot/test/closed)), \ - false,true) -endif - -HOTSPOT_AVAILABLE := $(if $(wildcard $(root_dir)/hotspot),true,false) - -########################################################################### -# To help in adoption of the new configure&&make build process, a bridge -# build will use the old settings to run configure and do the build. - -# Build with the configure bridge. After running configure, restart make -# to parse the new spec file. -BRIDGE_TARGETS := all -# Add bootcycle-images target if legacy variable is set. -ifeq ($(SKIP_BOOT_CYCLE),false) - BRIDGE_TARGETS += bootcycle-images -endif -bridgeBuild: bridge2configure - @cd $(root_dir) && $(MAKE) -f Makefile $(BRIDGE_TARGETS) - -# Bridge from old Makefile ALT settings to configure options -bridge2configure: $(BUILD_DIR_ROOT)/.bridge2configureOpts - bash ./configure $(strip $(shell $(CAT) $<)) - -# Create a file with configure options created from old Makefile mechanisms. -$(BUILD_DIR_ROOT)/.bridge2configureOpts: $(BUILD_DIR_ROOT)/.bridge2configureOptsLatest - $(RM) $@ - $(CP) $< $@ - -# Use this file to only change when obvious things have changed -$(BUILD_DIR_ROOT)/.bridge2configureOptsLatest: FRC - $(RM) $@.tmp - $(MKDIR) -p $(BUILD_DIR_ROOT) - @$(ECHO) " --with-debug-level=$(if $(DEBUG_LEVEL),$(DEBUG_LEVEL),release) " >> $@.tmp - ifdef ARCH_DATA_MODEL - @$(ECHO) " --with-target-bits=$(ARCH_DATA_MODEL) " >> $@.tmp - endif - ifeq ($(ARCH_DATA_MODEL),32) - @$(ECHO) " --with-jvm-variants=client,server " >> $@.tmp - endif - ifdef ALT_PARALLEL_COMPILE_JOBS - @$(ECHO) " --with-num-cores=$(ALT_PARALLEL_COMPILE_JOBS) " >> $@.tmp - endif - ifdef ALT_BOOTDIR - @$(ECHO) " --with-boot-jdk=$(call UnixPath,$(ALT_BOOTDIR)) " >> $@.tmp - endif - ifdef ALT_CUPS_HEADERS_PATH - @$(ECHO) " --with-cups-include=$(call UnixPath,$(ALT_CUPS_HEADERS_PATH)) " >> $@.tmp - endif - ifdef ALT_FREETYPE_HEADERS_PATH - @$(ECHO) " --with-freetype=$(call UnixPath,$(ALT_FREETYPE_HEADERS_PATH)/..) " >> $@.tmp - endif - ifdef ENABLE_SJAVAC - @$(ECHO) " --enable-sjavac" >> $@.tmp - endif - ifdef JDK_UPDATE_VERSION - @$(ECHO) " --with-update-version=$(JDK_UPDATE_VERSION)" >> $@.tmp - endif - ifeq ($(HOTSPOT_AVAILABLE),false) - ifdef ALT_JDK_IMPORT_PATH - @$(ECHO) " --with-import-hotspot=$(call UnixPath,$(ALT_JDK_IMPORT_PATH)) " >> $@.tmp - endif - endif - ifeq ($(OPEN_BUILD),true) - @$(ECHO) " --enable-openjdk-only " >> $@.tmp - else -# Todo: move to closed? - ifdef ALT_MOZILLA_HEADERS_PATH - @$(ECHO) " --with-mozilla-headers=$(call UnixPath,$(ALT_MOZILLA_HEADERS_PATH)) " >> $@.tmp - endif - ifdef ALT_JUNIT_DIR - @$(ECHO) " --with-junit-dir=$(call UnixPath,$(ALT_JUNIT_DIR)) " >> $@.tmp - endif - ifdef ANT_HOME - @$(ECHO) " --with-ant-home=$(call UnixPath,$(ANT_HOME)) " >> $@.tmp - endif - ifdef ALT_JAVAFX_ZIP_DIR - @$(ECHO) " --with-javafx-zip-dir=$(call UnixPath,$(ALT_JAVAFX_ZIP_DIR)) " >> $@.tmp - endif - ifdef ALT_JMC_ZIP_DIR - @$(ECHO) " --with-jmc-zip-dir=$(call UnixPath,$(ALT_JMC_ZIP_DIR)) " >> $@.tmp - endif - ifdef ALT_WIXDIR - @$(ECHO) " --with-wix=$(call UnixPath,$(ALT_WIXDIR)) " >> $@.tmp - endif - ifdef ALT_INSTALL_LZMA_PATH - @$(ECHO) " --with-lzma-path=$(call UnixPath,$(ALT_INSTALL_LZMA_PATH)) " >> $@.tmp - endif - ifdef ALT_INSTALL_UPX_PATH - @$(ECHO) " --with-upx-path=$(call UnixPath,$(ALT_INSTALL_UPX_PATH)) " >> $@.tmp - endif - ifdef ALT_INSTALL_UPX_FILENAME - @$(ECHO) " --with-upx-filename=$(call UnixPath,$(ALT_INSTALL_UPX_FILENAME)) " >> $@.tmp - endif ifdef ALT_BSDIFF_DIR @$(ECHO) " --with-bsdiff-dir=$(call UnixPath,$(ALT_BSDIFF_DIR)) " >> $@.tmp endif - ifdef ALT_CCSS_SIGNING_DIR - @$(ECHO) " --with-ccss-signing=$(call UnixPath,$(ALT_CCSS_SIGNING_DIR)) " >> $@.tmp - endif - ifdef ALT_SLASH_JAVA - @$(ECHO) " --with-java-devtools=$(call UnixPath,$(ALT_SLASH_JAVA)/devtools) " >> $@.tmp - endif - ifdef ALT_SPARKLE_FRAMEWORK_DIR - @$(ECHO) " --with-sparkle-framework=$(call UnixPath,$(ALT_SPARKLE_FRAMEWORK_DIR)) " >> $@.tmp - endif - endif - @if [ -f $@ ] ; then \ - if ! $(CMP) $@ $@.tmp > /dev/null ; then \ - $(CP) $@.tmp $@ ; \ - fi ; \ - else \ - $(CP) $@.tmp $@ ; \ - fi - $(RM) $@.tmp - -PHONY_LIST += bridge2configure bridgeBuild - -########################################################################### -# JPRT targets - ifndef JPRT_ARCHIVE_BUNDLE JPRT_ARCHIVE_BUNDLE=/tmp/jprt_bundles/j2sdk-image.zip endif @@ -179,22 +35,9 @@ JPRT_ARCHIVE_INSTALL_BUNDLE=/tmp/jprt_bundles/product-install.zip endif -# These targets execute in a SPEC free context, before calling bridgeBuild -# to generate the SPEC. -jprt_build_product: DEBUG_LEVEL=release -jprt_build_product: BUILD_DIRNAME=*-release -jprt_build_product: jprt_build_generic - -jprt_build_fastdebug: DEBUG_LEVEL=fastdebug -jprt_build_fastdebug: BUILD_DIRNAME=*-fastdebug -jprt_build_fastdebug: jprt_build_generic - -jprt_build_debug: DEBUG_LEVEL=slowdebug -jprt_build_debug: BUILD_DIRNAME=*-debug -jprt_build_debug: jprt_build_generic - -jprt_build_generic: BRIDGE_TARGETS+=jprt_bundle -jprt_build_generic: bridgeBuild +ifeq ($(SKIP_BOOT_CYCLE), false) + jprt_bundle: bootcycle-images +endif # This target must be called in the context of a SPEC file jprt_bundle: $(JPRT_ARCHIVE_BUNDLE) @@ -241,14 +84,6 @@ @$(call TargetExit) -# Keep track of phony targets -PHONY_LIST += jprt_build_product jprt_build_fastdebug jprt_build_debug \ - jprt_build_generic bundles jprt_bundle \ - final-images final-images-only - ########################################################################### # Phony targets -.PHONY: $(PHONY_LIST) - -# Force target -FRC: +.PHONY: jprt_bundle bundles bundles-only final-images final-images-only --- ./make/Main.gmk Mon Dec 08 12:28:03 2014 -0800 +++ ./make/Main.gmk Wed Feb 04 12:14:35 2015 -0800 @@ -242,4 +242,6 @@ .PHONY: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jdk clean-nashorn clean-images clean-docs clean-test clean-overlay-images clean-bootcycle-build .PHONY: profiles profiles-only profiles-oscheck +include $(root_dir)/make/Jprt.gmk + FRC: # Force target --- ./make/MakeHelpers.gmk Mon Dec 08 12:28:03 2014 -0800 +++ ./make/MakeHelpers.gmk Wed Feb 04 12:14:35 2015 -0800 @@ -50,7 +50,7 @@ # Global targets are possible to run either with or without a SPEC. The prototypical # global target is "help". -global_targets=help jprt% bridgeBuild bridge2configure +global_targets=help ############################## # Functions --- ./make/common/NON_CORE_PKGS.gmk Mon Dec 08 12:28:03 2014 -0800 +++ ./make/common/NON_CORE_PKGS.gmk Wed Feb 04 12:14:35 2015 -0800 @@ -83,6 +83,8 @@ com.sun.source.util \ jdk +NASHORNAPI_PKGS = jdk.nashorn.api.scripting + SMARTCARDIO_PKGS = javax.smartcardio SCTPAPI_PKGS = com.sun.nio.sctp @@ -95,7 +97,8 @@ endif JDK_PKGS = jdk \ - jdk.net + jdk.net \ + jdk.management.cmm # non-core packages in rt.jar NON_CORE_PKGS = $(DOMAPI_PKGS) \ --- ./make/jprt.properties Mon Dec 08 12:28:03 2014 -0800 +++ ./make/jprt.properties Wed Feb 04 12:14:35 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -23,132 +23,467 @@ # questions. # -# Properties for jprt +############## +# +# Global settings +# # The current release name -my.jdk.update.version=25 +my.jdk.update.version=40 jprt.tools.default.release=jdk8u${my.jdk.update.version} -# Unix toolkit to use for building on windows -jprt.windows.jdk8.build.unix.toolkit=cygwin +# Check if this is the equivalent of a hotspot push job +# Interpret -testset hotspot to mean exactly that +my.is.hotspot.job.hotspot=true +my.is.hotspot.job=${my.is.hotspot.job.${jprt.test.set}} -# The different build flavors we want, we override here so we just get these 2 -jprt.build.flavors=product,fastdebug - -# Standard list of jprt build targets for this source tree -jprt.build.targets= \ - solaris_sparcv9_5.10-{product|fastdebug}, \ - solaris_x64_5.10-{product|fastdebug}, \ - linux_i586_2.6-{product|fastdebug}, \ - linux_x64_2.6-{product|fastdebug}, \ - macosx_x64_10.7-{product|fastdebug}, \ - windows_i586_6.1-{product|fastdebug}, \ - windows_x64_6.1-{product|fastdebug} - -# User can select the test set with jprt submit "-testset name" option -jprt.my.test.set=${jprt.test.set} - -# Test target list (no fastdebug & limited c2 testing) -jprt.my.test.target.set= \ - solaris_sparcv9_5.10-product-c2-TESTNAME, \ - solaris_x64_5.10-product-c2-TESTNAME, \ - linux_i586_2.6-product-{c1|c2}-TESTNAME, \ - linux_x64_2.6-product-c2-TESTNAME, \ - macosx_x64_10.7-product-c2-TESTNAME, \ - windows_i586_6.1-product-c1-TESTNAME, \ - windows_x64_6.1-product-c2-TESTNAME - -# Default vm test targets (testset=default) -jprt.vm.default.test.targets= \ - ${jprt.my.test.target.set:TESTNAME=jvm98}, \ - ${jprt.my.test.target.set:TESTNAME=scimark} - -# Default jdk test targets (testset=default) -jprt.make.rule.default.test.targets= \ - ${jprt.my.test.target.set:TESTNAME=langtools_jtreg}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_lang}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_math}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_util} - - -# Default vm test targets (testset=core) -jprt.vm.core.test.targets= - -# Core jdk test targets (testset=core) -jprt.make.rule.core.test.targets= \ - ${jprt.my.test.target.set:TESTNAME=jdk_lang}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_math}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_util}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_io}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_net}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_nio}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_security1}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_security2}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_security3}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_rmi}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_text}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_time}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_other}, \ - ${jprt.my.test.target.set:TESTNAME=core_tools} - -# Svc vm test targets (testset=svc) -jprt.vm.svc.test.targets= - -# Core jdk test targets (testset=svc) -jprt.make.rule.svc.test.targets= \ - ${jprt.my.test.target.set:TESTNAME=jdk_management}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_instrument}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_jmx}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_jdi}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_jfr}, \ - ${jprt.my.test.target.set:TESTNAME=svc_tools} - -# All vm test targets (testset=all) -jprt.vm.all.test.targets= \ - ${jprt.vm.default.test.targets}, \ - ${jprt.my.test.target.set:TESTNAME=runThese}, \ - ${jprt.my.test.target.set:TESTNAME=jbb_default} - -# All jdk test targets (testset=all) -jprt.make.rule.all.test.targets= \ - ${jprt.make.rule.core.test.targets}, \ - ${jprt.make.rule.svc.test.targets}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_awt}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_beans1}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_beans2}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_beans3}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_sound}, \ - ${jprt.my.test.target.set:TESTNAME=jdk_swing} - -# PIT vm test targets (testset=pit) -jprt.vm.pit.test.targets= \ - ${jprt.vm.all.test.targets} - -# PIT jdk test targets (testset=pit) -jprt.make.rule.pit.test.targets= \ - ${jprt.my.test.target.set:TESTNAME=langtools_jtreg}, \ - ${jprt.make.rule.core.test.targets}, \ - ${jprt.make.rule.svc.test.targets} - -# JCK test targets in test/Makefile (no windows) -jprt.my.jck.test.target.set= \ - solaris_sparcv9_5.10-product-c2-JCK7TESTRULE, \ - solaris_x64_5.10-product-c2-JCK7TESTRULE, \ - linux_i586_2.6-product-c1-JCK7TESTRULE, \ - linux_x64_2.6-product-c2-JCK7TESTRULE - -# JCK testset targets (e.g. jprt submit -testset jck ... ) -jprt.make.rule.jck.test.targets= \ - ${jprt.my.jck.test.target.set:JCK7TESTRULE=jck7devtools}, \ - ${jprt.my.jck.test.target.set:JCK7TESTRULE=jck7runtime}, \ - ${jprt.my.jck.test.target.set:JCK7TESTRULE=jck7compiler} - -# Select list to use (allow for testset to be empty too) -jprt.make.rule..test.targets=${jprt.make.rule.default.test.targets} -jprt.make.rule.test.targets=${jprt.make.rule.${jprt.my.test.set}.test.targets} -jprt.vm..test.targets=${jprt.vm.default.test.targets} -jprt.test.targets=${jprt.vm.${jprt.my.test.set}.test.targets} +# Disable syncing the source after builds and tests are done +jprt.sync.push=${my.is.hotspot.job ? false : true} # Directories to be excluded from the source bundles jprt.bundle.exclude.src.dirs=build dist webrev +# Use configure when building +jprt.build.use.configure=true + +# Set make target to use for different build flavors +jprt.build.flavor.debugOpen.target=jprt_bundle +jprt.build.flavor.fastdebug.target=jprt_bundle +jprt.build.flavor.product.target=jprt_bundle +jprt.build.flavor.productOpen.target=jprt_bundle + +# Use these configure args to define debug level +jprt.debug.build.configure.args=--with-debug-level=slowdebug +jprt.fastdebug.build.configure.args=--with-debug-level=fastdebug +jprt.product.build.configure.args=--with-debug-level=release +jprt.debugOpen.build.configure.args=${jprt.debug.build.configure.args} --enable-openjdk-only +jprt.fastdebugOpen.build.configure.args=${jprt.fastdebug.build.configure.args} --enable-openjdk-only +jprt.productOpen.build.configure.args=${jprt.product.build.configure.args} --enable-openjdk-only + +# Select build flavors and build targets +jprt.build.flavors=${my.is.hotspot.job ? ${my.build.flavors.hotspot} : ${my.build.flavors.default}} +jprt.build.targets=${my.is.hotspot.job ? ${my.build.targets.hotspot} : ${my.build.targets.default}} + +# Select test targets - jprt default for jprt.test.set is "default" +jprt.test.targets=${my.test.targets.${jprt.test.set}} +jprt.make.rule.test.targets=${my.make.rule.test.targets.${jprt.test.set}} + +# 7155453: Work-around to prevent popups on OSX from blocking test completion +# but the work-around is added to all platforms to be consistent +jprt.jbb.options=-Djava.awt.headless=true + +######## +# +# Build options (generic) +# + +# Configure args common to all builds +# Also allows for additional, testset specific configure arguments to be set +jprt.build.configure.args= \ + --with-boot-jdk=$ALT_BOOTDIR \ + --with-jobs=$ALT_PARALLEL_COMPILE_JOBS \ + --with-update-version=${my.jdk.update.version} \ + ${my.additional.build.configure.args.${jprt.test.set}} + +# i586 is often cross-compiled from 64-bit machines, so need to set target bits explicitly +jprt.i586.build.configure.args= \ + --with-target-bits=32 + +# i586 platforms have both client and server, but to allow for overriding the exact configuration +# on a per-build flavor basis the value is set for the individual build flavors +# All other platforms only build server, which is the default setting from configure +my.i586.default.build.configure.args= \ + --with-jvm-variants=client,server +jprt.i586.debug.build.configure.args= \ + ${my.i586.default.build.configure.args} \ + ${jprt.debug.build.configure.args} +jprt.i586.fastdebug.build.configure.args= \ + ${my.i586.default.build.configure.args} \ + ${jprt.fastdebug.build.configure.args} +jprt.i586.product.build.configure.args= \ + ${my.i586.default.build.configure.args} \ + ${jprt.product.build.configure.args} +jprt.i586.debugOpen.build.configure.args= \ + ${my.i586.default.build.configure.args} \ + ${jprt.debugOpen.build.configure.args} +jprt.i586.fastdebugOpen.build.configure.args= \ + ${my.i586.default.build.configure.args} \ + ${jprt.fastdebugOpen.build.configure.args} +jprt.i586.productOpen.build.configure.args= \ + ${my.i586.default.build.configure.args} \ + ${jprt.productOpen.build.configure.args} + +######## +# +# Build targets and options (default/jdk) +# + +# The default build flavors +my.build.flavors.default=fastdebug,product + +# Standard list of jprt build targets for this source tree +my.build.targets.default= \ + solaris_sparcv9_5.10-{product|fastdebug}, \ + solaris_x64_5.10-{product|fastdebug}, \ + linux_i586_2.6-{product|fastdebug}, \ + linux_x64_2.6-{product|fastdebug}, \ + macosx_x64_10.7-{product|fastdebug}, \ + windows_i586_6.1-{product|fastdebug}, \ + windows_x64_6.1-{product|fastdebug} + +# Test target list (no fastdebug & limited c2 testing) +my.test.target.set= \ + solaris_sparcv9_5.10-product-c2-TESTNAME, \ + solaris_x64_5.10-product-c2-TESTNAME, \ + linux_i586_2.6-product-{c1|c2}-TESTNAME, \ + linux_x64_2.6-product-c2-TESTNAME, \ + macosx_x64_10.7-product-c2-TESTNAME, \ + windows_i586_6.1-product-c1-TESTNAME, \ + windows_x64_6.1-product-c2-TESTNAME + +# Default vm test targets (testset=default) +my.test.targets.default= \ + ${my.test.target.set:TESTNAME=jvm98}, \ + ${my.test.target.set:TESTNAME=scimark} + +# Default jdk test targets (testset=default) +my.make.rule.test.targets.default= \ + ${my.test.target.set:TESTNAME=langtools_jtreg}, \ + ${my.test.target.set:TESTNAME=jdk_lang}, \ + ${my.test.target.set:TESTNAME=jdk_math}, \ + ${my.test.target.set:TESTNAME=jdk_util} + +# Default vm test targets (testset=core) +my.test.targets.core= + +# Core jdk test targets (testset=core) +my.make.rule.test.targets.core= \ + ${my.test.target.set:TESTNAME=jdk_lang}, \ + ${my.test.target.set:TESTNAME=jdk_math}, \ + ${my.test.target.set:TESTNAME=jdk_util}, \ + ${my.test.target.set:TESTNAME=jdk_io}, \ + ${my.test.target.set:TESTNAME=jdk_net}, \ + ${my.test.target.set:TESTNAME=jdk_nio}, \ + ${my.test.target.set:TESTNAME=jdk_security1}, \ + ${my.test.target.set:TESTNAME=jdk_security2}, \ + ${my.test.target.set:TESTNAME=jdk_security3}, \ + ${my.test.target.set:TESTNAME=jdk_security4}, \ + ${my.test.target.set:TESTNAME=jdk_rmi}, \ + ${my.test.target.set:TESTNAME=jdk_text}, \ + ${my.test.target.set:TESTNAME=jdk_time}, \ + ${my.test.target.set:TESTNAME=jdk_other}, \ + ${my.test.target.set:TESTNAME=core_tools} + +# Svc vm test targets (testset=svc) +my.test.targets.svc= + +# Core jdk test targets (testset=svc) +my.make.rule.test.targets.svc= \ + ${my.test.target.set:TESTNAME=jdk_management}, \ + ${my.test.target.set:TESTNAME=jdk_instrument}, \ + ${my.test.target.set:TESTNAME=jdk_jmx}, \ + ${my.test.target.set:TESTNAME=jdk_jdi}, \ + ${my.test.target.set:TESTNAME=jdk_jfr}, \ + ${my.test.target.set:TESTNAME=jdk_rm}, \ + ${my.test.target.set:TESTNAME=svc_tools} + +# All vm test targets (testset=all) +my.test.targets.all= \ + ${my.test.targets.default}, \ + ${my.test.target.set:TESTNAME=runThese}, \ + ${my.test.target.set:TESTNAME=jbb_default} + +# All jdk test targets (testset=all) +my.make.rule.test.targets.all= \ + ${my.make.rule.test.targets.core}, \ + ${my.make.rule.test.targets.svc}, \ + ${my.test.target.set:TESTNAME=jdk_awt}, \ + ${my.test.target.set:TESTNAME=jdk_beans1}, \ + ${my.test.target.set:TESTNAME=jdk_beans2}, \ + ${my.test.target.set:TESTNAME=jdk_beans3}, \ + ${my.test.target.set:TESTNAME=jdk_sound}, \ + ${my.test.target.set:TESTNAME=jdk_swing} + +# PIT vm test targets (testset=pit) +my.test.targets.pit= \ + ${my.test.targets.all} + +# PIT jdk test targets (testset=pit) +my.make.rule.test.targets.pit= \ + ${my.test.target.set:TESTNAME=langtools_jtreg}, \ + ${my.make.rule.test.targets.core}, \ + ${my.make.rule.test.targets.svc} + +# JCK test targets in test/Makefile (no windows) +my.test.target.set.jck= \ + solaris_sparcv9_5.10-product-c2-JCK7TESTRULE, \ + solaris_x64_5.10-product-c2-JCK7TESTRULE, \ + linux_i586_2.6-product-c1-JCK7TESTRULE, \ + linux_x64_2.6-product-c2-JCK7TESTRULE + +# JCK testset targets +my.make.rule.test.targets.jck= \ + ${my.test.target.set.jck:JCK7TESTRULE=jck7devtools}, \ + ${my.test.target.set.jck:JCK7TESTRULE=jck7runtime}, \ + ${my.test.target.set.jck:JCK7TESTRULE=jck7compiler} + + +############# +# +# Hotspot related settings (testset=hotspot) +# + +# The hotspot build flavors +my.build.flavors.hotspot= \ + debugOpen,fastdebug,product,productOpen, \ + ${my.additional.build.flavors.hotspot} + +# Platforms built for hotspot push jobs +my.build.targets.hotspot= \ + solaris_sparcv9_5.10-{product|fastdebug|optimized}, \ + solaris_x64_5.10-{product|fastdebug}, \ + linux_i586_2.6-{product|fastdebug}, \ + linux_x64_2.6-{product|fastdebug|optimized}, \ + macosx_x64_10.7-{product|fastdebug}, \ + windows_i586_6.1-{product|fastdebug}, \ + windows_x64_6.1-{product|fastdebug|optimized}, \ + solaris_x64_5.10-{debugOpen}, \ + linux_x64_2.6-{productOpen}, \ + ${my.additional.build.targets.hotspot} + +# Tests to run on the various platforms for hotspot push jobs +my.test.targets.hotspot.solaris.sparcv9= \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-jvm98, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-jvm98_nontiered, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-scimark, \ + solaris_sparcv9_5.10-product-c2-runThese, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCBasher_SerialGC, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCBasher_ParallelGC, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCBasher_ParNewGC, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCBasher_CMS, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCBasher_G1, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCBasher_ParOldGC, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCOld_SerialGC, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCOld_ParallelGC, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCOld_ParNewGC, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCOld_CMS, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCOld_G1, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GCOld_ParOldGC, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-jbb_default_nontiered, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-jbb_SerialGC, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-jbb_ParallelGC, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-jbb_CMS, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-jbb_G1, \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-jbb_ParOldGC + +my.test.targets.hotspot.solaris.x64= \ + solaris_x64_5.10-{product|fastdebug}-c2-jvm98, \ + solaris_x64_5.10-{product|fastdebug}-c2-jvm98_nontiered, \ + solaris_x64_5.10-{product|fastdebug}-c2-scimark, \ + solaris_x64_5.10-product-c2-runThese, \ + solaris_x64_5.10-product-c2-runThese_Xcomp, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCBasher_SerialGC, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCBasher_ParallelGC, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCBasher_ParNewGC, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCBasher_CMS, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCBasher_G1, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCBasher_ParOldGC, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCOld_SerialGC, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCOld_ParallelGC, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCOld_ParNewGC, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCOld_CMS, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCOld_G1, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCOld_ParOldGC, \ + solaris_x64_5.10-{product|fastdebug}-c2-jbb_default_nontiered, \ + solaris_x64_5.10-{product|fastdebug}-c2-jbb_SerialGC, \ + solaris_x64_5.10-{product|fastdebug}-c2-jbb_ParallelGC, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCOld_CMS, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCOld_G1, \ + solaris_x64_5.10-{product|fastdebug}-c2-GCOld_ParOldGC + +my.test.targets.hotspot.linux.i586= \ + linux_i586_2.6-{product|fastdebug}-{c1|c2}-jvm98, \ + linux_i586_2.6-{product|fastdebug}-c2-jvm98_nontiered, \ + linux_i586_2.6-{product|fastdebug}-{c1|c2}-scimark, \ + linux_i586_2.6-product-c1-runThese_Xcomp, \ + linux_i586_2.6-fastdebug-c1-runThese_Xshare, \ + linux_i586_2.6-fastdebug-c2-runThese_Xcomp, \ + linux_i586_2.6-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ + linux_i586_2.6-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ + linux_i586_2.6-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ + linux_i586_2.6-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ + linux_i586_2.6-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ + linux_i586_2.6-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ + linux_i586_2.6-product-{c1|c2}-GCOld_SerialGC, \ + linux_i586_2.6-product-{c1|c2}-GCOld_ParallelGC, \ + linux_i586_2.6-product-{c1|c2}-GCOld_ParNewGC, \ + linux_i586_2.6-product-{c1|c2}-GCOld_CMS, \ + linux_i586_2.6-product-{c1|c2}-GCOld_G1, \ + linux_i586_2.6-product-{c1|c2}-GCOld_ParOldGC, \ + linux_i586_2.6-{product|fastdebug}-c1-jbb_SerialGC, \ + linux_i586_2.6-{product|fastdebug}-c2-jbb_default_nontiered, \ + linux_i586_2.6-{product|fastdebug}-c1-jbb_ParallelGC, \ + linux_i586_2.6-{product|fastdebug}-c1-jbb_CMS, \ + linux_i586_2.6-{product|fastdebug}-c1-jbb_G1, \ + linux_i586_2.6-{product|fastdebug}-c1-jbb_ParOldGC + +my.test.targets.hotspot.linux.x64= \ + linux_x64_2.6-{product|fastdebug}-c2-jvm98, \ + linux_x64_2.6-{product|fastdebug}-c2-jvm98_nontiered, \ + linux_x64_2.6-{product|fastdebug}-c2-scimark, \ + linux_x64_2.6-{product|fastdebug}-c2-GCBasher_SerialGC, \ + linux_x64_2.6-{product|fastdebug}-c2-GCBasher_ParallelGC, \ + linux_x64_2.6-{product|fastdebug}-c2-GCBasher_ParNewGC, \ + linux_x64_2.6-{product|fastdebug}-c2-GCBasher_CMS, \ + linux_x64_2.6-{product|fastdebug}-c2-GCBasher_G1, \ + linux_x64_2.6-{product|fastdebug}-c2-GCBasher_ParOldGC, \ + linux_x64_2.6-{product|fastdebug}-c2-GCOld_SerialGC, \ + linux_x64_2.6-{product|fastdebug}-c2-GCOld_ParallelGC, \ + linux_x64_2.6-{product|fastdebug}-c2-GCOld_ParNewGC, \ + linux_x64_2.6-{product|fastdebug}-c2-GCOld_CMS, \ + linux_x64_2.6-{product|fastdebug}-c2-GCOld_G1, \ + linux_x64_2.6-{product|fastdebug}-c2-GCOld_ParOldGC, \ + linux_x64_2.6-{product|fastdebug}-c2-jbb_default_nontiered, \ + linux_x64_2.6-{product|fastdebug}-c2-jbb_ParallelGC, \ + linux_x64_2.6-{product|fastdebug}-c2-jbb_G1, \ + linux_x64_2.6-{product|fastdebug}-c2-jbb_ParOldGC + +my.test.targets.hotspot.macosx.x64= \ + macosx_x64_10.7-{product|fastdebug}-c2-jvm98, \ + macosx_x64_10.7-{product|fastdebug}-c2-jvm98_nontiered, \ + macosx_x64_10.7-{product|fastdebug}-c2-scimark, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCBasher_SerialGC, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCBasher_ParallelGC, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCBasher_ParNewGC, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCBasher_CMS, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCBasher_G1, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCBasher_ParOldGC, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCOld_SerialGC, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCOld_ParallelGC, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCOld_ParNewGC, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCOld_CMS, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCOld_G1, \ + macosx_x64_10.7-{product|fastdebug}-c2-GCOld_ParOldGC, \ + macosx_x64_10.7-{product|fastdebug}-c2-jbb_default_nontiered, \ + macosx_x64_10.7-{product|fastdebug}-c2-jbb_ParallelGC, \ + macosx_x64_10.7-{product|fastdebug}-c2-jbb_G1, \ + macosx_x64_10.7-{product|fastdebug}-c2-jbb_ParOldGC + +my.test.targets.hotspot.windows.i586= \ + windows_i586_6.1-{product|fastdebug}-{c1|c2}-jvm98, \ + windows_i586_6.1-{product|fastdebug}-c2-jvm98_nontiered, \ + windows_i586_6.1-{product|fastdebug}-{c1|c2}-scimark, \ + windows_i586_6.1-product-{c1|c2}-runThese, \ + windows_i586_6.1-product-{c1|c2}-runThese_Xcomp, \ + windows_i586_6.1-fastdebug-c1-runThese_Xshare, \ + windows_i586_6.1-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ + windows_i586_6.1-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ + windows_i586_6.1-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ + windows_i586_6.1-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ + windows_i586_6.1-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ + windows_i586_6.1-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ + windows_i586_6.1-product-{c1|c2}-GCOld_SerialGC, \ + windows_i586_6.1-product-{c1|c2}-GCOld_ParallelGC, \ + windows_i586_6.1-product-{c1|c2}-GCOld_ParNewGC, \ + windows_i586_6.1-product-{c1|c2}-GCOld_CMS, \ + windows_i586_6.1-product-{c1|c2}-GCOld_G1, \ + windows_i586_6.1-product-{c1|c2}-GCOld_ParOldGC, \ + windows_i586_6.1-{product|fastdebug}-{c1|c2}-jbb_default, \ + windows_i586_6.1-{product|fastdebug}-c2-jbb_default_nontiered, \ + windows_i586_6.1-product-{c1|c2}-jbb_ParallelGC, \ + windows_i586_6.1-product-{c1|c2}-jbb_CMS, \ + windows_i586_6.1-product-{c1|c2}-jbb_G1, \ + windows_i586_6.1-product-{c1|c2}-jbb_ParOldGC + +my.test.targets.hotspot.windows.x64= \ + windows_x64_6.1-{product|fastdebug}-c2-jvm98, \ + windows_x64_6.1-{product|fastdebug}-c2-jvm98_nontiered, \ + windows_x64_6.1-{product|fastdebug}-c2-scimark, \ + windows_x64_6.1-product-c2-runThese, \ + windows_x64_6.1-product-c2-runThese_Xcomp, \ + windows_x64_6.1-{product|fastdebug}-c2-GCBasher_SerialGC, \ + windows_x64_6.1-{product|fastdebug}-c2-GCBasher_ParallelGC, \ + windows_x64_6.1-{product|fastdebug}-c2-GCBasher_ParNewGC, \ + windows_x64_6.1-{product|fastdebug}-c2-GCBasher_CMS, \ + windows_x64_6.1-{product|fastdebug}-c2-GCBasher_G1, \ + windows_x64_6.1-{product|fastdebug}-c2-GCBasher_ParOldGC, \ + windows_x64_6.1-{product|fastdebug}-c2-GCOld_SerialGC, \ + windows_x64_6.1-{product|fastdebug}-c2-GCOld_ParallelGC, \ + windows_x64_6.1-{product|fastdebug}-c2-GCOld_ParNewGC, \ + windows_x64_6.1-{product|fastdebug}-c2-GCOld_CMS, \ + windows_x64_6.1-{product|fastdebug}-c2-GCOld_G1, \ + windows_x64_6.1-{product|fastdebug}-c2-GCOld_ParOldGC, \ + windows_x64_6.1-{product|fastdebug}-c2-jbb_default, \ + windows_x64_6.1-{product|fastdebug}-c2-jbb_default_nontiered, \ + windows_x64_6.1-product-c2-jbb_CMS, \ + windows_x64_6.1-product-c2-jbb_ParallelGC, \ + windows_x64_6.1-product-c2-jbb_G1, \ + windows_x64_6.1-product-c2-jbb_ParOldGC + +# Some basic "smoke" tests for OpenJDK builds +my.test.targets.hotspot.open= \ + solaris_x64_5.10-{productOpen|fastdebugOpen}-c2-jvm98, \ + linux_x64_2.6-{productOpen|fastdebugOpen}-c2-jvm98 + +# The complete list of test targets for jprt +my.test.targets.hotspot= \ + ${my.test.targets.hotspot.open}, \ + ${my.test.targets.hotspot.solaris.sparcv9}, \ + ${my.test.targets.hotspot.solaris.x64}, \ + ${my.test.targets.hotspot.linux.i586}, \ + ${my.test.targets.hotspot.linux.x64}, \ + ${my.test.targets.hotspot.macosx.x64}, \ + ${my.test.targets.hotspot.windows.i586}, \ + ${my.test.targets.hotspot.windows.x64}, \ + ${my.test.targets.hotspot.solaris.sparcv9}, \ + ${my.test.targets.hotspot.solaris.x64}, \ + ${my.test.targets.hotspot.linux.x64}, \ + ${my.test.targets.hotspot.windows.i586}, \ + ${my.test.targets.hotspot.windows.x64}, \ + ${my.additional.test.targets.hotspot} + + +# Make file based test targets + +my.make.rule.test.targets.hotspot.clienttests= \ + linux_i586_2.6-*-c1-hotspot_clienttest, \ + windows_i586_6.1-*-c1-hotspot_clienttest + +my.make.rule.test.targets.hotspot.servertests= \ + solaris_sparcv9_5.10-*-c2-hotspot_servertest, \ + solaris_x64_5.10-*-c2-hotspot_servertest, \ + linux_i586_2.6-*-c2-hotspot_servertest, \ + linux_x64_2.6-*-c2-hotspot_servertest, \ + macosx_x64_10.7-*-c2-hotspot_servertest, \ + windows_i586_6.1-*-c2-hotspot_servertest, \ + windows_x64_6.1-*-c2-hotspot_servertest + +my.make.rule.test.targets.hotspot.internalvmtests= \ + solaris_sparcv9_5.10-fastdebug-c2-hotspot_internalvmtests, \ + solaris_x64_5.10-fastdebug-c2-hotspot_internalvmtests, \ + linux_i586_2.6-fastdebug-c2-hotspot_internalvmtests, \ + linux_x64_2.6-fastdebug-c2-hotspot_internalvmtests, \ + macosx_x64_10.7-fastdebug-c2-hotspot_internalvmtests, \ + windows_i586_6.1-fastdebug-c2-hotspot_internalvmtests, \ + windows_x64_6.1-fastdebug-c2-hotspot_internalvmtests + +my.make.rule.test.targets.hotspot.reg.group= \ + solaris_sparcv9_5.10-{product|fastdebug}-c2-GROUP, \ + solaris_x64_5.10-{product|fastdebug}-c2-GROUP, \ + linux_i586_2.6-{product|fastdebug}-c2-GROUP, \ + linux_x64_2.6-{product|fastdebug}-c2-GROUP, \ + windows_i586_6.1-{product|fastdebug}-c2-GROUP, \ + windows_x64_6.1-{product|fastdebug}-c2-GROUP, \ + linux_i586_2.6-{product|fastdebug}-c1-GROUP, \ + windows_i586_6.1-{product|fastdebug}-c1-GROUP + +my.make.rule.test.targets.hotspot= \ + ${my.make.rule.test.targets.hotspot.clienttests}, \ + ${my.make.rule.test.targets.hotspot.servertests}, \ + ${my.make.rule.test.targets.hotspot.internalvmtests}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_wbapitest}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_compiler}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_gc}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_runtime}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_serviceability}, \ + ${my.additional.make.rule.test.targets.hotspot} --- ./test/Makefile Mon Dec 08 12:28:03 2014 -0800 +++ ./test/Makefile Wed Feb 04 12:14:35 2015 -0800 @@ -33,6 +33,7 @@ # This makefile depends on the availability of sibling directories. LANGTOOLS_DIR=$(TOPDIR)/langtools JDK_DIR=$(TOPDIR)/jdk +HOTSPOT_DIR=$(TOPDIR)/hotspot # Macro to run a test target in a subdir define SUBDIR_TEST # subdirectory target @@ -62,6 +63,9 @@ jdk_% core_%s svc_%: @$(NO_STOPPING)$(call SUBDIR_TEST, $(JDK_DIR), TEST="$@" $@) +hotspot_%: + @$(NO_STOPPING)$(call SUBDIR_TEST, $(HOTSPOT_DIR), TEST="$@" $@) + ################################################################ # Phony targets (e.g. these are not filenames) --- ./corba/.hgtags Mon Dec 08 12:28:12 2014 -0800 +++ ./corba/.hgtags Wed Feb 04 12:14:39 2015 -0800 @@ -347,3 +347,29 @@ 474bf60980443dfae2fe6e121fef0caea4e014b3 jdk8u31-b10 7e2056eba0b62247407e065f3f88a89358fc26a6 jdk8u31-b11 285b0e589c50e46ca7ad3434221335901a547d66 jdk8u31-b12 +f89b454638d89ee5f44422b7a5b8e5651260e68f jdk8u31-b13 +7d1e0f0b63f1d66c77924d8b2a1accdf8f7480db jdk8u40-b00 +c5d9822a3c18cd9e274dfe99e91c33e02bd8f8f4 jdk8u40-b01 +504b4455570e14b7fc0a837a09c6401c603516d9 jdk8u40-b02 +91cd0ecdbe08eadf09ee89e64bf45360da8f2413 jdk8u40-b03 +f3969243c71a0f1c90f312ac755faad1deff3412 jdk8u40-b04 +740fea207f7045ab8ccc790867657d5c03a99eec jdk8u40-b05 +ced787f7545f26c3b1fdd35119ff41aa79fe6e03 jdk8u40-b06 +0d09cb188d39b5abb759977b9020ef957a0374ed jdk8u40-b07 +8d4971881c6661c51276ddb84b8e314b37d8d742 jdk8u40-b08 +bf87d71911661f583b9300795c9472179b4b3506 jdk8u40-b09 +a6d92ff8b962af32e2a9d66e956ff885d4c574b1 jdk8u40-b10 +a6dd66098734f58bc0bf847e2ca6f9b335689b9c jdk8u40-b11 +f5c47f0074b4822486c97709503b8885078c80f0 jdk8u40-b12 +7f8454e2f02fc296661a083dd4373d582980a86f jdk8u40-b13 +d36513f2cf934eb2f6f665a259cbbf2af82930c0 jdk8u40-b14 +46bf2b74baf4fc2f27b4c9238f8da94b7fcb8fe5 jdk8u40-b15 +0d3a452720a58bc203869ad606a4ea346c14c291 jdk8u40-b16 +bff1a326ac97c543b9c271adebc9deeda974edb1 jdk8u40-b17 +a1e2c13de84e00f2aedf4c40e96347306ede84f3 jdk8u40-b18 +8bbc2bb414b7e9331c2014c230553d72c9d161c5 jdk8u40-b19 +445eceffc829e205037098115c26e38e85ea5f7c jdk8u40-b20 +9c54cc92c0beb29179abbce272d3f5c8ba4ffd0e jdk8u40-b21 +4c7421f74674ebefb8e91eba59ab2a2db8c1abd7 jdk8u40-b22 +62f7faef5ed956cd481cae6216b22fdb4b6e3e46 jdk8u40-b23 +472aa5bae0e78614e873d56bcc31e7caba49963c jdk8u40-b24 --- ./corba/THIRD_PARTY_README Mon Dec 08 12:28:12 2014 -0800 +++ ./corba/THIRD_PARTY_README Wed Feb 04 12:14:39 2015 -0800 @@ -3385,7 +3385,7 @@ included with JRE 8, JDK 8, and OpenJDK 8. Apache Commons Math 3.2 - Apache Derby 10.10.1.3 + Apache Derby 10.11.1.2 Apache Jakarta BCEL 5.1 Apache Jakarta Regexp 1.4 Apache Santuario XML Security for Java 1.5.4 --- ./corba/src/share/classes/com/sun/corba/se/impl/io/OutputStreamHook.java Mon Dec 08 12:28:12 2014 -0800 +++ ./corba/src/share/classes/com/sun/corba/se/impl/io/OutputStreamHook.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,8 @@ import java.io.OutputStream; import java.io.ObjectOutputStream; import java.io.ObjectOutput; -import java.util.Hashtable; +import java.util.Map; +import java.util.HashMap; import org.omg.CORBA.INTERNAL; @@ -49,7 +50,7 @@ */ private class HookPutFields extends ObjectOutputStream.PutField { - private Hashtable fields = new Hashtable(); + private Map fields = new HashMap<>(); /** * Put the value of the named boolean field into the persistent field. @@ -140,7 +141,6 @@ public OutputStreamHook() throws java.io.IOException { super(); - } public void defaultWriteObject() throws IOException { --- ./hotspot/.hgtags Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/.hgtags Wed Feb 04 12:14:39 2015 -0800 @@ -503,6 +503,30 @@ eaa4074a7e3975cd33ec55e6b584586e2ac681bd jdk8u20-b26 7c9925f21c2529a88eb64b8039cc080f60b85e01 jdk8u20-b31 7edb04063a423e278fe34a0006d25fee198f495e jdk8u20-b32 +4828415ebbf11e205dcc08e97ad5ae7dd03522f9 jdk8u40-b00 +d952af8cf67dd1e7ab5fec9a299c6c6dafd1863e hs25.40-b01 +f0afba33c928ddaa2d5f003b90d683c143f78ea3 hs25.40-b02 +e2976043eac37c8036f6a6dfa454787f64fa3f56 hs25.40-b03 +cb95655ef06fece507bbc2792474411ab2e899ab hs25.40-b04 +dc06b830ea95ed953cac02e9e67a75ab682edb97 jdk8u40-b01 +897333c7e5874625bd26d09fdaf242196024e9c2 hs25.40-b05 +f52cb91647590fe4a12af295a8a87e2cb761b044 jdk8u40-b02 +fbc31318922c31488c0464ccd864d2cd1d9e21a7 hs25.40-b06 +38539608359a6dfc5740abb66f878af643757c3b jdk8u40-b03 +c3990b8c710e4c1996b5cd579681645d9f0408c1 hs25.40-b07 +3f1b3f2dd1cb224747a11a6788e58b5cb7683d57 hs25.40-b08 +fd4dbaff30027832dd21bcc7171ddb466ca2924f jdk8u40-b04 +c9635cad4a5d794a96b4a26d3e7ad1d783133add hs25.40-b09 +232b50b20797424c64da115ca48db131b1489ac1 jdk8u40-b05 +47ec483b936ee8cd2b26752e0aba3d5e6caab393 hs25.40-b10 +3702eb6ec7086186211ab7763a44c68fc7a898eb jdk8u40-b06 +4489ac5b084aae8e2a80b71ff98d8e8acc3bf290 hs25.40-b11 +b63d0e8bfc0738bba21ae67779780f59118a95f7 jdk8u40-b07 +5c1b5be2c69bcae610a790e9438da446c61d3361 hs25.40-b12 +905a16825d2931345a7d6dba9e427f98eb51761a jdk8u40-b08 +d96716f6cbba9f000dfb1da39d2b81264f4cdea7 hs25.40-b13 +7ff8d51e0d8fc71f3ad31fd15817083341416ca8 jdk8u40-b09 +e193bbae24effeaf476f688d8d840787db53d74e hs25.40-b14 a4d44dfb7d30eea54bc172e4429a655454ae0bbf jdk8u25-b00 9a2152fbd929b0d8b2f5c326a5526214ae71731a jdk8u25-b01 d3d5604ea0dea3812e87ba76ac199d0a8be6f49f jdk8u25-b02 @@ -536,3 +560,30 @@ d7b6bdd51abe68b16411d5b292fb830a43c5bc09 jdk8u31-b10 9906d432d6dbd2cda242e3f3cfde7cf6c90245bf jdk8u31-b11 e13839545238d1ecf17f0489bb6fb765de46719a jdk8u31-b12 +4206e725d584be942c25ff46ff23d8e299ca4a4c jdk8u31-b13 +1b3abbeee961dee49780c0e4af5337feb918c555 jdk8u40-b10 +f10fe402dfb1543723b4b117a7cba3ea3d4159f1 hs25.40-b15 +99372b2fee0eb8b3452f47230e84aa6e97003184 jdk8u40-b11 +8b9ec2da541a74ac698560b6a2bc45fccb789919 hs25.40-b16 +6b93bf9ea3ea57ed0fe53cfedb2f9ab912c324e5 jdk8u40-b12 +521e269ae1daa9df1cb0835b97aa76bdf340fcb2 hs25.40-b17 +86307d47790785398d0695acc361bccaefe25f94 jdk8u40-b13 +4d5dc0d0f8799fafa1135d51d85edd4edd566501 hs25.40-b18 +b8ca8ec1daea70f7c0d519e866f9f147ec247055 jdk8u40-b14 +eb16b24e2eba9bdf04a9b377bebc2db9f713ff5e jdk8u40-b15 +3a8a0fd171c5876023112941b1c7254262f9adfc hs25.40-b19 +aa2442f89230dc46147c721812f3b3bd4c612e83 hs25.40-b20 +5ea68fb91139081304357f9b937f32c5fdfeca6d jdk8u40-b16 +6bf89bfe8185747a57193efb6cec1f17ccc80414 hs25.40-b21 +fc1f9b67fd8c5d5cd94ecc03569d93e7ce7fb574 jdk8u40-b17 +bc5a90a4db47f1c497d7894434c42325f595cd02 hs25.40-b22 +31d3306aad29e39929418ed43f28212a5f5306a3 jdk8u40-b18 +f8fc5cbe082ce0fb0c6c1dcd39493a16ed916353 hs25.40-b23 +d9349fa8822336e0244da0a8448f3e6b2d62741d jdk8u40-b19 +c3933f52eeb33f70ee562464edddfe9f01d944fd jdk8u40-b20 +d2e9a6bec4f2eec8506eed16f7324992a85d8480 hs25.40-b24 +25ec4a67433744bbe3406e5069e7fd1876ebbf2f jdk8u40-b21 +0f0cb4eeab2d871274f4ffdcd6017d2fdfa89238 hs25.40-b25 +0ee548a1cda08c884eccd563e2d5fdb6ee769b5a jdk8u40-b22 +0e67683b700174eab71ea205d1cfa4f1cf4523ba jdk8u40-b23 +fa4e797f61e6dda1a60e06944018213bff2a1b76 jdk8u40-b24 --- ./hotspot/THIRD_PARTY_README Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/THIRD_PARTY_README Wed Feb 04 12:14:39 2015 -0800 @@ -3385,7 +3385,7 @@ included with JRE 8, JDK 8, and OpenJDK 8. Apache Commons Math 3.2 - Apache Derby 10.10.1.3 + Apache Derby 10.11.1.2 Apache Jakarta BCEL 5.1 Apache Jakarta Regexp 1.4 Apache Santuario XML Security for Java 1.5.4 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1Allocator.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,40 @@ +package sun.jvm.hotspot.gc_implementation.g1; + +import java.util.Observable; +import java.util.Observer; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.runtime.VM; +import sun.jvm.hotspot.runtime.VMObject; +import sun.jvm.hotspot.types.CIntegerField; +import sun.jvm.hotspot.types.Type; +import sun.jvm.hotspot.types.TypeDataBase; + +public class G1Allocator extends VMObject { + + //size_t _summary_bytes_used; + static private CIntegerField summaryBytesUsedField; + + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + static private synchronized void initialize(TypeDataBase db) { + Type type = db.lookupType("G1Allocator"); + + summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used"); + } + + public long getSummaryBytes() { + return summaryBytesUsedField.getValue(addr); + } + + public G1Allocator(Address addr) { + super(addr); + + } +} --- ./hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java Wed Feb 04 12:14:39 2015 -0800 @@ -36,19 +36,18 @@ import sun.jvm.hotspot.runtime.VM; import sun.jvm.hotspot.runtime.VMObjectFactory; import sun.jvm.hotspot.types.AddressField; -import sun.jvm.hotspot.types.CIntegerField; import sun.jvm.hotspot.types.Type; import sun.jvm.hotspot.types.TypeDataBase; // Mirror class for G1CollectedHeap. public class G1CollectedHeap extends SharedHeap { - // HeapRegionSeq _seq; - static private long hrsFieldOffset; - // MemRegion _g1_committed; - static private long g1CommittedFieldOffset; - // size_t _summary_bytes_used; - static private CIntegerField summaryBytesUsedField; + // HeapRegionManager _hrm; + static private long hrmFieldOffset; + // MemRegion _g1_reserved; + static private long g1ReservedFieldOffset; + // G1Allocator* _allocator + static private AddressField g1Allocator; // G1MonitoringSupport* _g1mm; static private AddressField g1mmField; // HeapRegionSet _old_set; @@ -67,32 +66,29 @@ static private synchronized void initialize(TypeDataBase db) { Type type = db.lookupType("G1CollectedHeap"); - hrsFieldOffset = type.getField("_hrs").getOffset(); - g1CommittedFieldOffset = type.getField("_g1_committed").getOffset(); - summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used"); + hrmFieldOffset = type.getField("_hrm").getOffset(); + g1Allocator = type.getAddressField("_allocator"); g1mmField = type.getAddressField("_g1mm"); oldSetFieldOffset = type.getField("_old_set").getOffset(); humongousSetFieldOffset = type.getField("_humongous_set").getOffset(); } public long capacity() { - Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset); - MemRegion g1Committed = new MemRegion(g1CommittedAddr); - return g1Committed.byteSize(); + return hrm().capacity(); } public long used() { - return summaryBytesUsedField.getValue(addr); + return allocator().getSummaryBytes(); } public long n_regions() { - return hrs().length(); + return hrm().length(); } - private HeapRegionSeq hrs() { - Address hrsAddr = addr.addOffsetTo(hrsFieldOffset); - return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class, - hrsAddr); + private HeapRegionManager hrm() { + Address hrmAddr = addr.addOffsetTo(hrmFieldOffset); + return (HeapRegionManager) VMObjectFactory.newObject(HeapRegionManager.class, + hrmAddr); } public G1MonitoringSupport g1mm() { @@ -100,6 +96,11 @@ return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr); } + public G1Allocator allocator() { + Address g1AllocatorAddr = g1Allocator.getValue(addr); + return (G1Allocator) VMObjectFactory.newObject(G1Allocator.class, g1AllocatorAddr); + } + public HeapRegionSetBase oldSet() { Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset); return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class, @@ -113,7 +114,7 @@ } private Iterator heapRegionIterator() { - return hrs().heapRegionIterator(); + return hrm().heapRegionIterator(); } public void heapRegionIterate(SpaceClosure scl) { --- ./hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java Wed Feb 04 12:14:39 2015 -0800 @@ -93,19 +93,35 @@ private class HeapRegionIterator implements Iterator { private long index; private long length; + private HeapRegion next; + + public HeapRegion positionToNext() { + HeapRegion result = next; + while (index < length && at(index) == null) { + index++; + } + if (index < length) { + next = at(index); + index++; // restart search at next element + } else { + next = null; + } + return result; + } @Override - public boolean hasNext() { return index < length; } + public boolean hasNext() { return next != null; } @Override - public HeapRegion next() { return at(index++); } + public HeapRegion next() { return positionToNext(); } @Override - public void remove() { /* not supported */ } + public void remove() { /* not supported */ } - HeapRegionIterator(long committedLength) { + HeapRegionIterator(long totalLength) { index = 0; - length = committedLength; + length = totalLength; + positionToNext(); } } --- ./hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java Wed Feb 04 12:14:39 2015 -0800 @@ -24,23 +24,26 @@ package sun.jvm.hotspot.gc_implementation.g1; +import java.util.ArrayList; +import java.util.List; import java.util.Observable; import java.util.Observer; - import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.memory.ContiguousSpace; +import sun.jvm.hotspot.memory.CompactibleSpace; +import sun.jvm.hotspot.memory.MemRegion; import sun.jvm.hotspot.runtime.VM; +import sun.jvm.hotspot.types.AddressField; import sun.jvm.hotspot.types.CIntegerField; import sun.jvm.hotspot.types.Type; import sun.jvm.hotspot.types.TypeDataBase; // Mirror class for HeapRegion. Currently we don't actually include -// any of its fields but only iterate over it (which we get "for free" -// as HeapRegion ultimately inherits from ContiguousSpace). +// any of its fields but only iterate over it. -public class HeapRegion extends ContiguousSpace { +public class HeapRegion extends CompactibleSpace { // static int GrainBytes; static private CIntegerField grainBytesField; + static private AddressField topField; static { VM.registerVMInitializedObserver(new Observer() { @@ -54,6 +57,8 @@ Type type = db.lookupType("HeapRegion"); grainBytesField = type.getCIntegerField("GrainBytes"); + topField = type.getAddressField("_top"); + } static public long grainBytes() { @@ -63,4 +68,25 @@ public HeapRegion(Address addr) { super(addr); } + + public Address top() { + return topField.getValue(addr); + } + + @Override + public List getLiveRegions() { + List res = new ArrayList(); + res.add(new MemRegion(bottom(), top())); + return res; + } + + @Override + public long used() { + return top().minus(bottom()); + } + + @Override + public long free() { + return end().minus(top()); + } } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionManager.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc_implementation.g1; + +import java.util.Iterator; +import java.util.Observable; +import java.util.Observer; + +import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.runtime.VM; +import sun.jvm.hotspot.runtime.VMObject; +import sun.jvm.hotspot.runtime.VMObjectFactory; +import sun.jvm.hotspot.types.AddressField; +import sun.jvm.hotspot.types.CIntegerField; +import sun.jvm.hotspot.types.Type; +import sun.jvm.hotspot.types.TypeDataBase; + +// Mirror class for HeapRegionManager. + +public class HeapRegionManager extends VMObject { + // G1HeapRegionTable _regions + static private long regionsFieldOffset; + // uint _committed_length + static private CIntegerField numCommittedField; + + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + static private synchronized void initialize(TypeDataBase db) { + Type type = db.lookupType("HeapRegionManager"); + + regionsFieldOffset = type.getField("_regions").getOffset(); + numCommittedField = type.getCIntegerField("_num_committed"); + } + + private G1HeapRegionTable regions() { + Address regionsAddr = addr.addOffsetTo(regionsFieldOffset); + return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class, + regionsAddr); + } + + public long capacity() { + return length() * HeapRegion.grainBytes(); + } + + public long length() { + return regions().length(); + } + + public long committedLength() { + return numCommittedField.getValue(addr); + } + + public Iterator heapRegionIterator() { + return regions().heapRegionIterator(length()); + } + + public HeapRegionManager(Address addr) { + super(addr); + } +} --- ./hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,84 +0,0 @@ -/* - * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc_implementation.g1; - -import java.util.Iterator; -import java.util.Observable; -import java.util.Observer; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.runtime.VMObjectFactory; -import sun.jvm.hotspot.types.AddressField; -import sun.jvm.hotspot.types.CIntegerField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable. - -public class HeapRegionSeq extends VMObject { - // G1HeapRegionTable _regions - static private long regionsFieldOffset; - // uint _committed_length - static private CIntegerField committedLengthField; - - static { - VM.registerVMInitializedObserver(new Observer() { - public void update(Observable o, Object data) { - initialize(VM.getVM().getTypeDataBase()); - } - }); - } - - static private synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("HeapRegionSeq"); - - regionsFieldOffset = type.getField("_regions").getOffset(); - committedLengthField = type.getCIntegerField("_committed_length"); - } - - private G1HeapRegionTable regions() { - Address regionsAddr = addr.addOffsetTo(regionsFieldOffset); - return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class, - regionsAddr); - } - - public long length() { - return regions().length(); - } - - public long committedLength() { - return committedLengthField.getValue(addr); - } - - public Iterator heapRegionIterator() { - return regions().heapRegionIterator(committedLength()); - } - - public HeapRegionSeq(Address addr) { - super(addr); - } -} --- ./hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Wed Feb 04 12:14:39 2015 -0800 @@ -152,7 +152,7 @@ private long indexOffset(long index) { if (Assert.ASSERTS_ENABLED) { - Assert.that(index > 0 && index < getLength(), "invalid cp index " + index + " " + getLength()); + Assert.that(index >= 0 && index < getLength(), "invalid cp index " + index + " " + getLength()); } return (index * getElementSize()) + headerSize; } --- ./hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java Wed Feb 04 12:14:39 2015 -0800 @@ -98,11 +98,14 @@ break; default: throw new IllegalArgumentException(); } + if (cpCache == null) { return (short) cpCacheIndex; } else if (fmt.indexOf("JJJJ") >= 0) { - // change byte-ordering and go via secondary cache entry - throw new InternalError("unimplemented"); + // Invokedynamic require special handling + cpCacheIndex = ~cpCacheIndex; + cpCacheIndex = bytes.swapInt(cpCacheIndex); + return (short) cpCache.getEntryAt(cpCacheIndex).getConstantPoolIndex(); } else if (fmt.indexOf("JJ") >= 0) { // change byte-ordering and go via cache return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort((short)cpCacheIndex))).getConstantPoolIndex(); --- ./hotspot/make/Makefile Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/Makefile Wed Feb 04 12:14:39 2015 -0800 @@ -95,6 +95,7 @@ COMMON_VM_PRODUCT_TARGETS=product product1 docs export_product COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 docs export_fastdebug COMMON_VM_DEBUG_TARGETS=debug debug1 docs export_debug +COMMON_VM_OPTIMIZED_TARGETS=optimized optimized1 docs export_optimized # JDK directory list JDK_DIRS=bin include jre lib demo @@ -111,20 +112,21 @@ all_product: product1 docs export_product all_fastdebug: fastdebug1 docs export_fastdebug all_debug: debug1 docs export_debug +all_optimized: optimized1 docs export_optimized else ifeq ($(MACOSX_UNIVERSAL),true) all_product: universal_product all_fastdebug: universal_fastdebug all_debug: universal_debug +all_optimized: universal_optimized else all_product: $(COMMON_VM_PRODUCT_TARGETS) all_fastdebug: $(COMMON_VM_FASTDEBUG_TARGETS) all_debug: $(COMMON_VM_DEBUG_TARGETS) +all_optimized: $(COMMON_VM_OPTIMIZED_TARGETS) endif endif -all_optimized: optimized optimized1 docs export_optimized - allzero: all_productzero all_fastdebugzero all_productzero: productzero docs export_product all_fastdebugzero: fastdebugzero docs export_fastdebug @@ -302,7 +304,7 @@ export_product_jdk:: $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export export_optimized_jdk:: - $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export + $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) generic_export export_fastdebug_jdk:: $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) generic_export export_debug_jdk:: @@ -686,6 +688,19 @@ ($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \ fi +copy_optimized_jdk:: + $(RM) -r $(JDK_IMAGE_DIR)/optimized + $(MKDIR) -p $(JDK_IMAGE_DIR)/optimized + if [ -d $(JDK_IMPORT_PATH)/optimized ] ; then \ + ($(CD) $(JDK_IMPORT_PATH)/optimized && \ + $(TAR) -cf - $(JDK_DIRS)) | \ + ($(CD) $(JDK_IMAGE_DIR)/optimized && $(TAR) -xf -) ; \ + else \ + ($(CD) $(JDK_IMPORT_PATH) && \ + $(TAR) -cf - $(JDK_DIRS)) | \ + ($(CD) $(JDK_IMAGE_DIR)/optimized && $(TAR) -xf -) ; \ + fi + # # Check target # --- ./hotspot/make/aix/makefiles/adjust-mflags.sh Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/aix/makefiles/adjust-mflags.sh Wed Feb 04 12:14:39 2015 -0800 @@ -1,6 +1,6 @@ #! /bin/sh # -# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ echo "$MFLAGS" \ | sed ' s/^-/ -/ - s/ -\([^ ][^ ]*\)j/ -\1 -j/ + s/ -\([^ I][^ I]*\)j/ -\1 -j/ s/ -j[0-9][0-9]*/ -j/ s/ -j\([^ ]\)/ -j -\1/ s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/ --- ./hotspot/make/aix/makefiles/fastdebug.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/aix/makefiles/fastdebug.make Wed Feb 04 12:14:39 2015 -0800 @@ -67,7 +67,6 @@ # not justified. LFLAGS_QIPA= -G_SUFFIX = _g VERSION = optimized SYSDEFS += -DASSERT -DFASTDEBUG PICFLAGS = DEFAULT --- ./hotspot/make/aix/makefiles/xlc.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/aix/makefiles/xlc.make Wed Feb 04 12:14:39 2015 -0800 @@ -74,6 +74,12 @@ # no xlc counterpart for -fcheck-new # CFLAGS += -fcheck-new +# We need to define this on the command line if we want to use the the +# predefined format specifiers from "inttypes.h". Otherwise system headrs +# can indirectly include inttypes.h before we define __STDC_FORMAT_MACROS +# in globalDefinitions.hpp +CFLAGS += -D__STDC_FORMAT_MACROS + ARCHFLAG = -q64 CFLAGS += $(ARCHFLAG) --- ./hotspot/make/bsd/makefiles/adjust-mflags.sh Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/bsd/makefiles/adjust-mflags.sh Wed Feb 04 12:14:39 2015 -0800 @@ -64,7 +64,7 @@ echo "$MFLAGS" \ | sed ' s/^-/ -/ - s/ -\([^ ][^ ]*\)j/ -\1 -j/ + s/ -\([^ I][^ I]*\)j/ -\1 -j/ s/ -j[0-9][0-9]*/ -j/ s/ -j\([^ ]\)/ -j -\1/ s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/ --- ./hotspot/make/bsd/makefiles/mapfile-vers-debug Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/bsd/makefiles/mapfile-vers-debug Wed Feb 04 12:14:39 2015 -0800 @@ -187,6 +187,9 @@ _JVM_IsSupportedJNIVersion _JVM_IsThreadAlive _JVM_IsVMGeneratedMethodIx + _JVM_KnownToNotExist + _JVM_GetResourceLookupCacheURLs + _JVM_GetResourceLookupCache _JVM_LatestUserDefinedLoader _JVM_Listen _JVM_LoadClass0 --- ./hotspot/make/bsd/makefiles/mapfile-vers-product Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/bsd/makefiles/mapfile-vers-product Wed Feb 04 12:14:39 2015 -0800 @@ -187,6 +187,9 @@ _JVM_IsSupportedJNIVersion _JVM_IsThreadAlive _JVM_IsVMGeneratedMethodIx + _JVM_KnownToNotExist + _JVM_GetResourceLookupCacheURLs + _JVM_GetResourceLookupCache _JVM_LatestUserDefinedLoader _JVM_Listen _JVM_LoadClass0 --- ./hotspot/make/bsd/makefiles/universal.gmk Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/bsd/makefiles/universal.gmk Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,8 @@ # macosx universal builds universal_product: $(MAKE) MACOSX_UNIVERSAL=true all_product_universal +universal_optimized: + $(MAKE) MACOSX_UNIVERSAL=true all_optimized_universal universal_fastdebug: $(MAKE) MACOSX_UNIVERSAL=true all_fastdebug_universal universal_debug: @@ -36,6 +38,10 @@ # $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_PRODUCT_TARGETS) $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_PRODUCT_TARGETS) $(QUIETLY) $(MAKE) BUILD_FLAVOR=product EXPORT_SUBDIR= universalize +all_optimized_universal: +# $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_OPTIMIZED_TARGETS) + $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_OPTIMIZED_TARGETS) + $(QUIETLY) $(MAKE) BUILD_FLAVOR=optimized EXPORT_SUBDIR=/optimized universalize all_fastdebug_universal: # $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_FASTDEBUG_TARGETS) $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_FASTDEBUG_TARGETS) @@ -98,13 +104,15 @@ export_product_jdk:: $(MAKE) EXPORT_SUBDIR= export_universal export_optimized_jdk:: - $(MAKE) EXPORT_SUBDIR= export_universal + $(MAKE) EXPORT_SUBDIR=/optimized export_universal export_fastdebug_jdk:: $(MAKE) EXPORT_SUBDIR=/fastdebug export_universal export_debug_jdk:: $(MAKE) EXPORT_SUBDIR=/debug export_universal copy_product_jdk:: $(MAKE) COPY_SUBDIR= copy_universal +copy_optimized_jdk:: + $(MAKE) COPY_SUBDIR=/optimized copy_universal copy_fastdebug_jdk:: $(MAKE) COPY_SUBDIR=/fastdebug copy_universal copy_debug_jdk:: @@ -112,5 +120,6 @@ .PHONY: universal_product universal_fastdebug universal_debug \ all_product_universal all_fastdebug_universal all_debug_universal \ + universal_optimized all_optimized_universal \ universalize export_universal copy_universal \ $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST) --- ./hotspot/make/bsd/makefiles/vm.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/bsd/makefiles/vm.make Wed Feb 04 12:14:39 2015 -0800 @@ -243,10 +243,10 @@ vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES)) -mapfile : $(MAPFILE) vm.def +mapfile : $(MAPFILE) vm.def mapfile_ext rm -f $@ awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \ - { system ("cat vm.def"); } \ + { system ("cat mapfile_ext"); system ("cat vm.def"); } \ else \ { print $$0 } \ }' > $@ < $(MAPFILE) @@ -258,6 +258,13 @@ vm.def: $(Res_Files) $(Obj_Files) sh $(GAMMADIR)/make/bsd/makefiles/build_vm_def.sh *.o > $@ +mapfile_ext: + rm -f $@ + touch $@ + if [ -f $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext ]; then \ + cat $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext > $@; \ + fi + STATIC_CXX = false ifeq ($(LINK_INTO),AOUT) --- ./hotspot/make/excludeSrc.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/excludeSrc.make Wed Feb 04 12:14:39 2015 -0800 @@ -21,6 +21,9 @@ # questions. # # + +include $(GAMMADIR)/make/altsrc.make + ifeq ($(INCLUDE_JVMTI), false) CXXFLAGS += -DINCLUDE_JVMTI=0 CFLAGS += -DINCLUDE_JVMTI=0 @@ -70,37 +73,49 @@ CXXFLAGS += -DINCLUDE_CDS=0 CFLAGS += -DINCLUDE_CDS=0 - Src_Files_EXCLUDE += filemap.cpp metaspaceShared.cpp + Src_Files_EXCLUDE += filemap.cpp metaspaceShared*.cpp sharedPathsMiscInfo.cpp \ + systemDictionaryShared.cpp classLoaderExt.cpp sharedClassUtil.cpp endif ifeq ($(INCLUDE_ALL_GCS), false) CXXFLAGS += -DINCLUDE_ALL_GCS=0 CFLAGS += -DINCLUDE_ALL_GCS=0 - Src_Files_EXCLUDE += \ - cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \ - cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp compactibleFreeListSpace.cpp \ - concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \ - freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp \ - collectionSetChooser.cpp concurrentG1Refine.cpp concurrentG1RefineThread.cpp \ - concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \ - g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \ - g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \ - g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \ - g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1StringDedup.cpp g1StringDedupStat.cpp \ - g1StringDedupTable.cpp g1StringDedupThread.cpp g1StringDedupQueue.cpp g1_globals.cpp heapRegion.cpp \ - g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \ - ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp g1CodeCacheRemSet.cpp \ - adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \ - cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \ - parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \ - psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp psGenerationCounters.cpp \ - psMarkSweep.cpp psMarkSweepDecorator.cpp psMemoryPool.cpp psOldGen.cpp \ - psParallelCompact.cpp psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp \ - psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \ - parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \ - gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \ - mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp hSpaceCounters.cpp + gc_impl := $(HS_COMMON_SRC)/share/vm/gc_implementation + gc_impl_alt := $(HS_ALT_SRC)/share/vm/gc_implementation + gc_subdirs := concurrentMarkSweep g1 parallelScavenge parNew + gc_exclude := $(foreach gc,$(gc_subdirs), \ + $(notdir $(wildcard $(gc_impl)/$(gc)/*.cpp)) \ + $(notdir $(wildcard $(gc_impl_alt)/$(gc)/*.cpp))) + Src_Files_EXCLUDE += $(gc_exclude) + + # Exclude everything in $(gc_impl)/shared except the files listed + # in $(gc_shared_keep). + gc_shared_all := $(notdir $(wildcard $(gc_impl)/shared/*.cpp)) + gc_shared_keep := \ + adaptiveSizePolicy.cpp \ + ageTable.cpp \ + collectorCounters.cpp \ + cSpaceCounters.cpp \ + gcId.cpp \ + gcPolicyCounters.cpp \ + gcStats.cpp \ + gcTimer.cpp \ + gcTrace.cpp \ + gcTraceSend.cpp \ + gcTraceTime.cpp \ + gcUtil.cpp \ + generationCounters.cpp \ + markSweep.cpp \ + objectCountEventSender.cpp \ + spaceDecorator.cpp \ + vmGCOperations.cpp + Src_Files_EXCLUDE += $(filter-out $(gc_shared_keep),$(gc_shared_all)) + + # src/share/vm/services + Src_Files_EXCLUDE += \ + g1MemoryPool.cpp \ + psMemoryPool.cpp endif ifeq ($(INCLUDE_NMT), false) @@ -108,8 +123,8 @@ CFLAGS += -DINCLUDE_NMT=0 Src_Files_EXCLUDE += \ - memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \ - memTracker.cpp nmtDCmd.cpp + memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \ + memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp endif -include $(HS_ALT_MAKE)/excludeSrc.make --- ./hotspot/make/hotspot_version Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/hotspot_version Wed Feb 04 12:14:39 2015 -0800 @@ -34,8 +34,8 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2015 HS_MAJOR_VER=25 -HS_MINOR_VER=31 -HS_BUILD_NUMBER=07 +HS_MINOR_VER=40 +HS_BUILD_NUMBER=25 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 --- ./hotspot/make/jprt.gmk Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/jprt.gmk Wed Feb 04 12:14:39 2015 -0800 @@ -42,6 +42,9 @@ jprt_build_fastdebugEmb: $(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_fastdebug +jprt_build_optimizedEmb: + $(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_optimized + jprt_build_productOpen: $(MAKE) OPENJDK=true jprt_build_product @@ -51,6 +54,9 @@ jprt_build_fastdebugOpen: $(MAKE) OPENJDK=true jprt_build_fastdebug +jprt_build_optimizedOpen: + $(MAKE) OPENJDK=true jprt_build_optimized + jprt_build_product: all_product copy_product_jdk export_product_jdk ( $(CD) $(JDK_IMAGE_DIR) && \ $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . ) @@ -63,5 +69,9 @@ ( $(CD) $(JDK_IMAGE_DIR)/debug && \ $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . ) -.PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug +jprt_build_optimized: all_optimized copy_optimized_jdk export_optimized_jdk + ( $(CD) $(JDK_IMAGE_DIR)/optimized && \ + $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . ) +.PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug jprt_build_optimized + --- ./hotspot/make/jprt.properties Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,403 +0,0 @@ -# -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Properties for jprt - -# All build result bundles are full jdks. -jprt.need.sibling.build=false - -# At submit time, the release supplied will be in jprt.submit.release -# and will be one of the official release names defined in jprt. -# jprt supports property value expansion using ${property.name} syntax. - -# This tells jprt what default release we want to build - -jprt.hotspot.default.release=jdk8u20 - -jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}} - -# Disable syncing the source after builds and tests are done. - -jprt.sync.push=false - -# Note: we want both embedded releases and regular releases to build and test -# all platforms so that regressions are not introduced (eg. change to -# common code by SE breaks PPC/ARM; change to common code by SE-E breaks -# sparc etc. - -# Define the Solaris platforms we want for the various releases -jprt.my.solaris.sparcv9.jdk8u20=solaris_sparcv9_5.10 -jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10 -jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7} -jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}} - -jprt.my.solaris.x64.jdk8u20=solaris_x64_5.10 -jprt.my.solaris.x64.jdk7=solaris_x64_5.10 -jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7} -jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}} - -jprt.my.linux.i586.jdk8u20=linux_i586_2.6 -jprt.my.linux.i586.jdk7=linux_i586_2.6 -jprt.my.linux.i586.jdk7u8=${jprt.my.linux.i586.jdk7} -jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}} - -jprt.my.linux.x64.jdk8u20=linux_x64_2.6 -jprt.my.linux.x64.jdk7=linux_x64_2.6 -jprt.my.linux.x64.jdk7u8=${jprt.my.linux.x64.jdk7} -jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}} - -jprt.my.linux.ppc.jdk8u20=linux_ppc_2.6 -jprt.my.linux.ppc.jdk7=linux_ppc_2.6 -jprt.my.linux.ppc.jdk7u8=${jprt.my.linux.ppc.jdk7} -jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}} - -jprt.my.linux.ppcv2.jdk8u20=linux_ppcv2_2.6 -jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6 -jprt.my.linux.ppcv2.jdk7u8=${jprt.my.linux.ppcv2.jdk7} -jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}} - -jprt.my.linux.armvfpsflt.jdk8u20=linux_armvfpsflt_2.6 -jprt.my.linux.armvfpsflt=${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}} - -jprt.my.linux.armvfphflt.jdk8u20=linux_armvfphflt_2.6 -jprt.my.linux.armvfphflt=${jprt.my.linux.armvfphflt.${jprt.tools.default.release}} - -# The ARM GP vfp-sflt build is not currently supported -#jprt.my.linux.armvs.jdk8u20=linux_armvs_2.6 -#jprt.my.linux.armvs=${jprt.my.linux.armvs.${jprt.tools.default.release}} - -jprt.my.linux.armvh.jdk8u20=linux_armvh_2.6 -jprt.my.linux.armvh=${jprt.my.linux.armvh.${jprt.tools.default.release}} - -jprt.my.linux.armsflt.jdk8u20=linux_armsflt_2.6 -jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6 -jprt.my.linux.armsflt.jdk7u8=${jprt.my.linux.armsflt.jdk7} -jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}} - -jprt.my.macosx.x64.jdk8u20=macosx_x64_10.7 -jprt.my.macosx.x64.jdk7=macosx_x64_10.7 -jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7} -jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}} - -jprt.my.windows.i586.jdk8u20=windows_i586_6.1 -jprt.my.windows.i586.jdk7=windows_i586_6.1 -jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7} -jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}} - -jprt.my.windows.x64.jdk8u20=windows_x64_6.1 -jprt.my.windows.x64.jdk7=windows_x64_6.1 -jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7} -jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}} - -# Standard list of jprt build targets for this source tree - -jprt.build.targets.standard= \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \ - ${jprt.my.solaris.x64}-{product|fastdebug}, \ - ${jprt.my.linux.i586}-{product|fastdebug}, \ - ${jprt.my.linux.x64}-{product|fastdebug|optimized}, \ - ${jprt.my.macosx.x64}-{product|fastdebug}, \ - ${jprt.my.windows.i586}-{product|fastdebug}, \ - ${jprt.my.windows.x64}-{product|fastdebug|optimized}, \ - ${jprt.my.linux.armvh}-{product|fastdebug} - -jprt.build.targets.open= \ - ${jprt.my.solaris.x64}-{debugOpen}, \ - ${jprt.my.linux.x64}-{productOpen} - -jprt.build.targets.embedded= \ - ${jprt.my.linux.i586}-{productEmb|fastdebugEmb}, \ - ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \ - ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \ - ${jprt.my.linux.armvfpsflt}-{productEmb|fastdebugEmb}, \ - ${jprt.my.linux.armvfphflt}-{productEmb|fastdebugEmb}, \ - ${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb} - -jprt.build.targets.all=${jprt.build.targets.standard}, \ - ${jprt.build.targets.embedded}, ${jprt.build.targets.open} - -jprt.build.targets.jdk8u20=${jprt.build.targets.all} -jprt.build.targets.jdk7=${jprt.build.targets.all} -jprt.build.targets.jdk7u8=${jprt.build.targets.all} -jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}} - -# Subset lists of test targets for this source tree - -jprt.my.solaris.sparcv9.test.targets= \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \ - ${jprt.my.solaris.sparcv9}-product-c2-runThese, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_SerialGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_CMS, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_G1, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_SerialGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParallelGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParNewGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_CMS, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_nontiered, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_CMS, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_G1, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParOldGC - -jprt.my.solaris.x64.test.targets= \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98_nontiered, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \ - ${jprt.my.solaris.x64}-product-c2-runThese, \ - ${jprt.my.solaris.x64}-product-c2-runThese_Xcomp, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_G1, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC - -jprt.my.linux.i586.test.targets = \ - ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \ - ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-scimark, \ - ${jprt.my.linux.i586}-product-c1-runThese_Xcomp, \ - ${jprt.my.linux.i586}-fastdebug-c1-runThese_Xshare, \ - ${jprt.my.linux.i586}-fastdebug-c2-runThese_Xcomp, \ - ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ - ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ - ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ - ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ - ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ - ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ - ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_SerialGC, \ - ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParallelGC, \ - ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParNewGC, \ - ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_CMS, \ - ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \ - ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_SerialGC, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_G1, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParOldGC - -jprt.my.linux.x64.test.targets = \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_nontiered, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_G1, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_CMS, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC - -jprt.my.macosx.x64.test.targets = \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC - -jprt.my.windows.i586.test.targets = \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \ - ${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-scimark, \ - ${jprt.my.windows.i586}-product-{c1|c2}-runThese, \ - ${jprt.my.windows.i586}-product-{c1|c2}-runThese_Xcomp, \ - ${jprt.my.windows.i586}-fastdebug-c1-runThese_Xshare, \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ - ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_SerialGC, \ - ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParallelGC, \ - ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParNewGC, \ - ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_CMS, \ - ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_G1, \ - ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParOldGC, \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jbb_default, \ - ${jprt.my.windows.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \ - ${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParallelGC, \ - ${jprt.my.windows.i586}-product-{c1|c2}-jbb_CMS, \ - ${jprt.my.windows.i586}-product-{c1|c2}-jbb_G1, \ - ${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParOldGC - -jprt.my.windows.x64.test.targets = \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98_nontiered, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \ - ${jprt.my.windows.x64}-product-c2-runThese, \ - ${jprt.my.windows.x64}-product-c2-runThese_Xcomp, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_G1, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_CMS, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_G1, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \ - ${jprt.my.windows.x64}-product-c2-jbb_CMS, \ - ${jprt.my.windows.x64}-product-c2-jbb_ParallelGC, \ - ${jprt.my.windows.x64}-product-c2-jbb_G1, \ - ${jprt.my.windows.x64}-product-c2-jbb_ParOldGC - -# Some basic "smoke" tests for OpenJDK builds -jprt.test.targets.open = \ - ${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \ - ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98 - -# Testing for actual embedded builds is different to standard -jprt.my.linux.i586.test.targets.embedded = \ - linux_i586_2.6-product-c1-scimark - -# The complete list of test targets for jprt -# Note: no PPC or ARM tests at this stage - -jprt.test.targets.standard = \ - ${jprt.my.linux.i586.test.targets.embedded}, \ - ${jprt.my.solaris.sparcv9.test.targets}, \ - ${jprt.my.solaris.x64.test.targets}, \ - ${jprt.my.linux.i586.test.targets}, \ - ${jprt.my.linux.x64.test.targets}, \ - ${jprt.my.macosx.x64.test.targets}, \ - ${jprt.my.windows.i586.test.targets}, \ - ${jprt.my.windows.x64.test.targets}, \ - ${jprt.test.targets.open} - -jprt.test.targets.embedded= \ - ${jprt.my.linux.i586.test.targets.embedded}, \ - ${jprt.my.solaris.sparcv9.test.targets}, \ - ${jprt.my.solaris.x64.test.targets}, \ - ${jprt.my.linux.x64.test.targets}, \ - ${jprt.my.windows.i586.test.targets}, \ - ${jprt.my.windows.x64.test.targets} - -jprt.test.targets.jdk8u20=${jprt.test.targets.standard} -jprt.test.targets.jdk7=${jprt.test.targets.standard} -jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7} -jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}} - -# The default test/Makefile targets that should be run - -#jprt.make.rule.test.targets=*-product-*-packtest - -jprt.make.rule.test.targets.standard.client = \ - ${jprt.my.linux.i586}-*-c1-clienttest, \ - ${jprt.my.windows.i586}-*-c1-clienttest - -jprt.make.rule.test.targets.standard.server = \ - ${jprt.my.solaris.sparcv9}-*-c2-servertest, \ - ${jprt.my.solaris.x64}-*-c2-servertest, \ - ${jprt.my.linux.i586}-*-c2-servertest, \ - ${jprt.my.linux.x64}-*-c2-servertest, \ - ${jprt.my.macosx.x64}-*-c2-servertest, \ - ${jprt.my.windows.i586}-*-c2-servertest, \ - ${jprt.my.windows.x64}-*-c2-servertest - -jprt.make.rule.test.targets.standard.internalvmtests = \ - ${jprt.my.solaris.sparcv9}-fastdebug-c2-internalvmtests, \ - ${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \ - ${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \ - ${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \ - ${jprt.my.macosx.x64}-fastdebug-c2-internalvmtests, \ - ${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \ - ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests - -jprt.make.rule.test.targets.standard.wbapi = \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.windows.i586}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c1-wbapitest, \ - ${jprt.my.windows.i586}-{product|fastdebug}-c1-wbapitest - -jprt.make.rule.test.targets.standard = \ - ${jprt.make.rule.test.targets.standard.client}, \ - ${jprt.make.rule.test.targets.standard.server}, \ - ${jprt.make.rule.test.targets.standard.internalvmtests}, \ - ${jprt.make.rule.test.targets.standard.wbapi} - -jprt.make.rule.test.targets.embedded = \ - ${jprt.make.rule.test.targets.standard.client} - -jprt.make.rule.test.targets.jdk8u20=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.jdk7u8=${jprt.make.rule.test.targets.jdk7} -jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}} - -# 7155453: Work-around to prevent popups on OSX from blocking test completion -# but the work-around is added to all platforms to be consistent -jprt.jbb.options=-Djava.awt.headless=true --- ./hotspot/make/linux/makefiles/adjust-mflags.sh Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/linux/makefiles/adjust-mflags.sh Wed Feb 04 12:14:39 2015 -0800 @@ -64,7 +64,7 @@ echo "$MFLAGS" \ | sed ' s/^-/ -/ - s/ -\([^ ][^ ]*\)j/ -\1 -j/ + s/ -\([^ I][^ I]*\)j/ -\1 -j/ s/ -j[0-9][0-9]*/ -j/ s/ -j\([^ ]\)/ -j -\1/ s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/ --- ./hotspot/make/linux/makefiles/mapfile-vers-debug Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/linux/makefiles/mapfile-vers-debug Wed Feb 04 12:14:39 2015 -0800 @@ -217,6 +217,9 @@ JVM_RegisterSignal; JVM_ReleaseUTF; JVM_ResolveClass; + JVM_KnownToNotExist; + JVM_GetResourceLookupCacheURLs; + JVM_GetResourceLookupCache; JVM_ResumeThread; JVM_Send; JVM_SendTo; --- ./hotspot/make/linux/makefiles/mapfile-vers-product Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/linux/makefiles/mapfile-vers-product Wed Feb 04 12:14:39 2015 -0800 @@ -217,6 +217,9 @@ JVM_RegisterSignal; JVM_ReleaseUTF; JVM_ResolveClass; + JVM_KnownToNotExist; + JVM_GetResourceLookupCacheURLs; + JVM_GetResourceLookupCache; JVM_ResumeThread; JVM_Send; JVM_SendTo; --- ./hotspot/make/linux/makefiles/vm.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/linux/makefiles/vm.make Wed Feb 04 12:14:39 2015 -0800 @@ -233,10 +233,10 @@ vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES)) -mapfile : $(MAPFILE) vm.def +mapfile : $(MAPFILE) vm.def mapfile_ext rm -f $@ awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \ - { system ("cat vm.def"); } \ + { system ("cat mapfile_ext"); system ("cat vm.def"); } \ else \ { print $$0 } \ }' > $@ < $(MAPFILE) @@ -248,6 +248,13 @@ vm.def: $(Res_Files) $(Obj_Files) sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@ +mapfile_ext: + rm -f $@ + touch $@ + if [ -f $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext ]; then \ + cat $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext > $@; \ + fi + ifeq ($(JVM_VARIANT_ZEROSHARK), true) STATIC_CXX = false else --- ./hotspot/make/solaris/Makefile Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/solaris/Makefile Wed Feb 04 12:14:39 2015 -0800 @@ -159,6 +159,7 @@ BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) ARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) BUILDTREE_VARS += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY) ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) ZIPEXE=$(ZIPEXE) +BUILDTREE_VARS += HS_ALT_MAKE=$(HS_ALT_MAKE) BUILDTREE = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS) --- ./hotspot/make/solaris/makefiles/add_gnu_debuglink.make Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,54 +0,0 @@ -# -# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Rules to build add_gnu_debuglink, used by vm.make on Solaris - -# Allow $(ADD_GNU_DEBUGLINK) to be called from any directory. -# We don't set or use the GENERATED macro to avoid affecting -# other HotSpot Makefiles. -TOPDIR = $(shell echo `pwd`) -ADD_GNU_DEBUGLINK = $(TOPDIR)/../generated/add_gnu_debuglink - -ADD_GNU_DEBUGLINK_DIR = $(GAMMADIR)/src/os/solaris/add_gnu_debuglink -ADD_GNU_DEBUGLINK_SRC = $(ADD_GNU_DEBUGLINK_DIR)/add_gnu_debuglink.c -ADD_GNU_DEBUGLINK_FLAGS = -LIBS_ADD_GNU_DEBUGLINK += -lelf - -ifeq ("${Platform_compiler}", "sparcWorks") -# Enable the following ADD_GNU_DEBUGLINK_FLAGS addition if you need to -# compare the built ELF objects. -# -# The -g option makes static data global and the "-W0,-noglobal" -# option tells the compiler to not globalize static data using a unique -# globalization prefix. Instead force the use of a static globalization -# prefix based on the source filepath so the objects from two identical -# compilations are the same. -# -# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't -# seem to work. I got "-W0,-noglobal" from Kelly and that works. -#ADD_GNU_DEBUGLINK_FLAGS += -W0,-noglobal -endif # Platform_compiler == sparcWorks - -$(ADD_GNU_DEBUGLINK): $(ADD_GNU_DEBUGLINK_SRC) - $(CC) -g -o $@ $< $(ADD_GNU_DEBUGLINK_FLAGS) $(LIBS_ADD_GNU_DEBUGLINK) --- ./hotspot/make/solaris/makefiles/adjust-mflags.sh Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/solaris/makefiles/adjust-mflags.sh Wed Feb 04 12:14:39 2015 -0800 @@ -64,7 +64,7 @@ echo "$MFLAGS" \ | sed ' s/^-/ -/ - s/ -\([^ ][^ ]*\)j/ -\1 -j/ + s/ -\([^ I][^ I]*\)j/ -\1 -j/ s/ -j[0-9][0-9]*/ -j/ s/ -j\([^ ]\)/ -j -\1/ s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/ --- ./hotspot/make/solaris/makefiles/buildtree.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/solaris/makefiles/buildtree.make Wed Feb 04 12:14:39 2015 -0800 @@ -258,6 +258,8 @@ echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \ [ -n "$(ZIPEXE)" ] && \ echo && echo "ZIPEXE = $(ZIPEXE)"; \ + [ -n "$(HS_ALT_MAKE)" ] && \ + echo && echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \ [ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \ echo && \ echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \ --- ./hotspot/make/solaris/makefiles/defs.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/solaris/makefiles/defs.make Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -133,6 +133,55 @@ OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY)) endif + ifneq ($(OBJCOPY),) + # OBJCOPY version check: + # - version number is last blank separate word on first line + # - version number formats that have been seen: + # - . + # - .. + # + # Full Debug Symbols on Solaris needs version 2.21.1 or newer. + # + OBJCOPY_VERS_CHK := $(shell \ + $(OBJCOPY) --version \ + | sed -n \ + -e 's/.* //' \ + -e '/^[01]\./b bad' \ + -e '/^2\./{' \ + -e ' s/^2\.//' \ + -e ' /^[0-9]$$/b bad' \ + -e ' /^[0-9]\./b bad' \ + -e ' /^1[0-9]$$/b bad' \ + -e ' /^1[0-9]\./b bad' \ + -e ' /^20\./b bad' \ + -e ' /^21\.0$$/b bad' \ + -e ' /^21\.0\./b bad' \ + -e '}' \ + -e ':good' \ + -e 's/.*/VALID_VERSION/p' \ + -e 'q' \ + -e ':bad' \ + -e 's/.*/BAD_VERSION/p' \ + -e 'q' \ + ) + ifeq ($(OBJCOPY_VERS_CHK),BAD_VERSION) + _JUNK_ := $(shell \ + echo >&2 "WARNING: $(OBJCOPY) --version info:"; \ + $(OBJCOPY) --version | sed -n -e 's/^/WARNING: /p' -e 'q' >&2; \ + echo >&2 "WARNING: an objcopy version of 2.21.1 or newer" \ + "is needed to create valid .debuginfo files."; \ + echo >&2 "WARNING: ignoring above objcopy command."; \ + echo >&2 "WARNING: patch 149063-01 or newer contains the" \ + "correct Solaris 10 SPARC version."; \ + echo >&2 "WARNING: patch 149064-01 or newer contains the" \ + "correct Solaris 10 X86 version."; \ + echo >&2 "WARNING: Solaris 11 Update 1 contains the" \ + "correct version."; \ + ) + OBJCOPY= + endif + endif + ifeq ($(OBJCOPY),) _JUNK_ := $(shell \ echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files.") --- ./hotspot/make/solaris/makefiles/dtrace.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/solaris/makefiles/dtrace.make Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -96,25 +96,16 @@ XLIBJVM_DTRACE_DEBUGINFO = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO) XLIBJVM_DTRACE_DIZ = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ) -$(XLIBJVM_DB): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE) +$(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE) @echo Making $@ $(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \ $(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \ $(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set. -# Clear the SHF_ALLOC flag (if set) from empty section headers. -# An empty section header has sh_addr == 0 and sh_size == 0. -# This problem has only been seen on Solaris X64, but we call this tool -# on all Solaris builds just in case. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available. -# $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) ; # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not # in the link name: - ( cd $(XLIBJVM_DIR) && $(ADD_GNU_DEBUGLINK) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) ) + ( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) ) ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else @@ -131,20 +122,16 @@ endif endif -$(XLIBJVM_DTRACE): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) +$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) @echo Making $@ $(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \ $(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \ $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# Clear the SHF_ALLOC flag (if set) from empty section headers. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) ; # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not # in the link name: - ( cd $(XLIBJVM_DIR) && $(ADD_GNU_DEBUGLINK) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) ) + ( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) ) ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else @@ -201,17 +188,13 @@ $(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp $(QUIETLY) $(CXX) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp -$(LIBJVM_DB): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE) +$(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE) @echo Making $@ $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \ $(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# Clear the SHF_ALLOC flag (if set) from empty section headers. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@ - $(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DB_DEBUGINFO) $@ + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@ ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else @@ -226,17 +209,13 @@ endif endif -$(LIBJVM_DTRACE): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) +$(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) @echo Making $@ $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \ $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# Clear the SHF_ALLOC flag (if set) from empty section headers. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@ - $(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DTRACE_DEBUGINFO) $@ + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@ ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else --- ./hotspot/make/solaris/makefiles/fix_empty_sec_hdr_flags.make Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,54 +0,0 @@ -# -# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Rules to build fix_empty_sec_hdr_flags, used by vm.make on Solaris - -# Allow $(FIX_EMPTY_SEC_HDR_FLAGS) to be called from any directory. -# We don't set or use the GENERATED macro to avoid affecting -# other HotSpot Makefiles. -TOPDIR = $(shell echo `pwd`) -FIX_EMPTY_SEC_HDR_FLAGS = $(TOPDIR)/../generated/fix_empty_sec_hdr_flags - -FIX_EMPTY_SEC_HDR_FLAGS_DIR = $(GAMMADIR)/src/os/solaris/fix_empty_sec_hdr_flags -FIX_EMPTY_SEC_HDR_FLAGS_SRC = $(FIX_EMPTY_SEC_HDR_FLAGS_DIR)/fix_empty_sec_hdr_flags.c -FIX_EMPTY_SEC_HDR_FLAGS_FLAGS = -LIBS_FIX_EMPTY_SEC_HDR_FLAGS += -lelf - -ifeq ("${Platform_compiler}", "sparcWorks") -# Enable the following FIX_EMPTY_SEC_HDR_FLAGS_FLAGS addition if you need to -# compare the built ELF objects. -# -# The -g option makes static data global and the "-W0,-noglobal" -# option tells the compiler to not globalize static data using a unique -# globalization prefix. Instead force the use of a static globalization -# prefix based on the source filepath so the objects from two identical -# compilations are the same. -# -# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't -# seem to work. I got "-W0,-noglobal" from Kelly and that works. -#FIX_EMPTY_SEC_HDR_FLAGS_FLAGS += -W0,-noglobal -endif # Platform_compiler == sparcWorks - -$(FIX_EMPTY_SEC_HDR_FLAGS): $(FIX_EMPTY_SEC_HDR_FLAGS_SRC) - $(CC) -g -o $@ $< $(FIX_EMPTY_SEC_HDR_FLAGS_FLAGS) $(LIBS_FIX_EMPTY_SEC_HDR_FLAGS) --- ./hotspot/make/solaris/makefiles/jsig.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/solaris/makefiles/jsig.make Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -47,22 +47,13 @@ LFLAGS_JSIG += -mt -xnolib endif -$(LIBJSIG): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE) +$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE) @echo Making signal interposition lib... $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \ $(LFLAGS_JSIG) -o $@ $(JSIGSRCDIR)/jsig.c -ldl ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set. -# Clear the SHF_ALLOC flag (if set) from empty section headers. -# An empty section header has sh_addr == 0 and sh_size == 0. -# This problem has only been seen on Solaris X64, but we call this tool -# on all Solaris builds just in case. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available. -# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@ - $(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJSIG_DEBUGINFO) $@ + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@ ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else --- ./hotspot/make/solaris/makefiles/mapfile-vers Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/solaris/makefiles/mapfile-vers Wed Feb 04 12:14:39 2015 -0800 @@ -189,6 +189,9 @@ JVM_IsSupportedJNIVersion; JVM_IsThreadAlive; JVM_IsVMGeneratedMethodIx; + JVM_KnownToNotExist; + JVM_GetResourceLookupCacheURLs; + JVM_GetResourceLookupCache; JVM_LatestUserDefinedLoader; JVM_Listen; JVM_LoadClass0; --- ./hotspot/make/solaris/makefiles/saproc.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/solaris/makefiles/saproc.make Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -90,7 +90,7 @@ #SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER -$(LIBSAPROC): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE) +$(LIBSAPROC): $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE) $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ exit 1; \ @@ -121,17 +121,8 @@ -c -o $(SADISOBJ) ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set. -# Clear the SHF_ALLOC flag (if set) from empty section headers. -# An empty section header has sh_addr == 0 and sh_size == 0. -# This problem has only been seen on Solaris X64, but we call this tool -# on all Solaris builds just in case. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available. -# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@ - $(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBSAPROC_DEBUGINFO) $@ + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@ ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else --- ./hotspot/make/solaris/makefiles/vm.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/solaris/makefiles/vm.make Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -153,14 +153,6 @@ include $(MAKEFILES_DIR)/dtrace.make #---------------------------------------------------------------------- -# add_gnu_debuglink tool -include $(MAKEFILES_DIR)/add_gnu_debuglink.make - -#---------------------------------------------------------------------- -# fix_empty_sec_hdr_flags tool -include $(MAKEFILES_DIR)/fix_empty_sec_hdr_flags.make - -#---------------------------------------------------------------------- # JVM JVM = jvm @@ -247,11 +239,12 @@ vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES)) -mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def +mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def mapfile_ext rm -f $@ cat $(MAPFILE) $(MAPFILE_DTRACE_OPT) \ | $(NAWK) '{ \ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") { \ + system ("cat mapfile_ext"); \ system ("cat vm.def"); \ } else { \ print $$0; \ @@ -265,6 +258,13 @@ vm.def: $(Obj_Files) sh $(GAMMADIR)/make/solaris/makefiles/build_vm_def.sh *.o > $@ +mapfile_ext: + rm -f $@ + touch $@ + if [ -f $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext ]; then \ + cat $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext > $@; \ + fi + ifeq ($(LINK_INTO),AOUT) LIBJVM.o = LIBJVM_MAPFILE = @@ -291,7 +291,7 @@ LINK_VM = $(LINK_LIB.CXX) endif # making the library: -$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE) +$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),) @echo Linking vm... $(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK) @@ -299,17 +299,8 @@ $(QUIETLY) $(LINK_LIB.CXX/POST_HOOK) $(QUIETLY) rm -f $@.1 && ln -s $@ $@.1 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set. -# Clear the SHF_ALLOC flag (if set) from empty section headers. -# An empty section header has sh_addr == 0 and sh_size == 0. -# This problem has only been seen on Solaris X64, but we call this tool -# on all Solaris builds just in case. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available. -# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@ - $(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DEBUGINFO) $@ + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@ ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else --- ./hotspot/make/windows/makefiles/compile.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/windows/makefiles/compile.make Wed Feb 04 12:14:39 2015 -0800 @@ -268,7 +268,7 @@ !endif LD_FLAGS= $(LD_FLAGS) kernel32.lib user32.lib gdi32.lib winspool.lib \ comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib \ - uuid.lib Wsock32.lib winmm.lib /nologo /machine:$(MACHINE) /opt:REF \ + uuid.lib Wsock32.lib winmm.lib version.lib /nologo /machine:$(MACHINE) /opt:REF \ /opt:ICF,8 !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1" LD_FLAGS= $(LD_FLAGS) /map /debug --- ./hotspot/make/windows/makefiles/projectcreator.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/windows/makefiles/projectcreator.make Wed Feb 04 12:14:39 2015 -0800 @@ -72,6 +72,7 @@ -ignorePath arm \ -ignorePath ppc \ -ignorePath zero \ + -ignorePath aix \ -hidePath .hg --- ./hotspot/make/windows/makefiles/sa.make Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/make/windows/makefiles/sa.make Wed Feb 04 12:14:39 2015 -0800 @@ -111,7 +111,7 @@ SA_LFLAGS = $(SA_LFLAGS) -map -debug !endif !if "$(BUILDARCH)" == "i486" -SA_LFLAGS = $(SAFESEH_FLAG) $(SA_LFLAGS) +SA_LFLAGS = /SAFESEH $(SA_LFLAGS) !endif SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG) --- ./hotspot/src/cpu/ppc/vm/assembler_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/assembler_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -693,7 +693,7 @@ // PPC 1, section 4.6.7 Floating-Point Compare Instructions fcmpu( CCR7, F24, F25); - tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end()); + tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", p2i(code()->insts_begin()), p2i(code()->insts_end())); code()->decode(); } --- ./hotspot/src/cpu/ppc/vm/assembler_ppc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/assembler_ppc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -268,8 +268,35 @@ ISEL_OPCODE = (31u << OPCODE_SHIFT | 15u << 1), - MTLR_OPCODE = (31u << OPCODE_SHIFT | 467u << 1 | 8 << SPR_0_4_SHIFT), - MFLR_OPCODE = (31u << OPCODE_SHIFT | 339u << 1 | 8 << SPR_0_4_SHIFT), + // Special purpose registers + MTSPR_OPCODE = (31u << OPCODE_SHIFT | 467u << 1), + MFSPR_OPCODE = (31u << OPCODE_SHIFT | 339u << 1), + + MTXER_OPCODE = (MTSPR_OPCODE | 1 << SPR_0_4_SHIFT), + MFXER_OPCODE = (MFSPR_OPCODE | 1 << SPR_0_4_SHIFT), + + MTDSCR_OPCODE = (MTSPR_OPCODE | 3 << SPR_0_4_SHIFT), + MFDSCR_OPCODE = (MFSPR_OPCODE | 3 << SPR_0_4_SHIFT), + + MTLR_OPCODE = (MTSPR_OPCODE | 8 << SPR_0_4_SHIFT), + MFLR_OPCODE = (MFSPR_OPCODE | 8 << SPR_0_4_SHIFT), + + MTCTR_OPCODE = (MTSPR_OPCODE | 9 << SPR_0_4_SHIFT), + MFCTR_OPCODE = (MFSPR_OPCODE | 9 << SPR_0_4_SHIFT), + + MTTFHAR_OPCODE = (MTSPR_OPCODE | 128 << SPR_0_4_SHIFT), + MFTFHAR_OPCODE = (MFSPR_OPCODE | 128 << SPR_0_4_SHIFT), + MTTFIAR_OPCODE = (MTSPR_OPCODE | 129 << SPR_0_4_SHIFT), + MFTFIAR_OPCODE = (MFSPR_OPCODE | 129 << SPR_0_4_SHIFT), + MTTEXASR_OPCODE = (MTSPR_OPCODE | 130 << SPR_0_4_SHIFT), + MFTEXASR_OPCODE = (MFSPR_OPCODE | 130 << SPR_0_4_SHIFT), + MTTEXASRU_OPCODE = (MTSPR_OPCODE | 131 << SPR_0_4_SHIFT), + MFTEXASRU_OPCODE = (MFSPR_OPCODE | 131 << SPR_0_4_SHIFT), + + MTVRSAVE_OPCODE = (MTSPR_OPCODE | 256 << SPR_0_4_SHIFT), + MFVRSAVE_OPCODE = (MFSPR_OPCODE | 256 << SPR_0_4_SHIFT), + + MFTB_OPCODE = (MFSPR_OPCODE | 268 << SPR_0_4_SHIFT), MTCRF_OPCODE = (31u << OPCODE_SHIFT | 144u << 1), MFCR_OPCODE = (31u << OPCODE_SHIFT | 19u << 1), @@ -291,13 +318,11 @@ // CTR-related opcodes BCCTR_OPCODE = (19u << OPCODE_SHIFT | 528u << 1), - MTCTR_OPCODE = (31u << OPCODE_SHIFT | 467u << 1 | 9 << SPR_0_4_SHIFT), - MFCTR_OPCODE = (31u << OPCODE_SHIFT | 339u << 1 | 9 << SPR_0_4_SHIFT), - LWZ_OPCODE = (32u << OPCODE_SHIFT), LWZX_OPCODE = (31u << OPCODE_SHIFT | 23u << 1), LWZU_OPCODE = (33u << OPCODE_SHIFT), + LWBRX_OPCODE = (31u << OPCODE_SHIFT | 534 << 1), LHA_OPCODE = (42u << OPCODE_SHIFT), LHAX_OPCODE = (31u << OPCODE_SHIFT | 343u << 1), @@ -306,6 +331,7 @@ LHZ_OPCODE = (40u << OPCODE_SHIFT), LHZX_OPCODE = (31u << OPCODE_SHIFT | 279u << 1), LHZU_OPCODE = (41u << OPCODE_SHIFT), + LHBRX_OPCODE = (31u << OPCODE_SHIFT | 790 << 1), LBZ_OPCODE = (34u << OPCODE_SHIFT), LBZX_OPCODE = (31u << OPCODE_SHIFT | 87u << 1), @@ -583,6 +609,37 @@ MTVSCR_OPCODE = (4u << OPCODE_SHIFT | 1604u ), MFVSCR_OPCODE = (4u << OPCODE_SHIFT | 1540u ), + // AES (introduced with Power 8) + VCIPHER_OPCODE = (4u << OPCODE_SHIFT | 1288u), + VCIPHERLAST_OPCODE = (4u << OPCODE_SHIFT | 1289u), + VNCIPHER_OPCODE = (4u << OPCODE_SHIFT | 1352u), + VNCIPHERLAST_OPCODE = (4u << OPCODE_SHIFT | 1353u), + VSBOX_OPCODE = (4u << OPCODE_SHIFT | 1480u), + + // SHA (introduced with Power 8) + VSHASIGMAD_OPCODE = (4u << OPCODE_SHIFT | 1730u), + VSHASIGMAW_OPCODE = (4u << OPCODE_SHIFT | 1666u), + + // Vector Binary Polynomial Multiplication (introduced with Power 8) + VPMSUMB_OPCODE = (4u << OPCODE_SHIFT | 1032u), + VPMSUMD_OPCODE = (4u << OPCODE_SHIFT | 1224u), + VPMSUMH_OPCODE = (4u << OPCODE_SHIFT | 1096u), + VPMSUMW_OPCODE = (4u << OPCODE_SHIFT | 1160u), + + // Vector Permute and Xor (introduced with Power 8) + VPERMXOR_OPCODE = (4u << OPCODE_SHIFT | 45u), + + // Transactional Memory instructions (introduced with Power 8) + TBEGIN_OPCODE = (31u << OPCODE_SHIFT | 654u << 1), + TEND_OPCODE = (31u << OPCODE_SHIFT | 686u << 1), + TABORT_OPCODE = (31u << OPCODE_SHIFT | 910u << 1), + TABORTWC_OPCODE = (31u << OPCODE_SHIFT | 782u << 1), + TABORTWCI_OPCODE = (31u << OPCODE_SHIFT | 846u << 1), + TABORTDC_OPCODE = (31u << OPCODE_SHIFT | 814u << 1), + TABORTDCI_OPCODE = (31u << OPCODE_SHIFT | 878u << 1), + TSR_OPCODE = (31u << OPCODE_SHIFT | 750u << 1), + TCHECK_OPCODE = (31u << OPCODE_SHIFT | 718u << 1), + // Icache and dcache related instructions DCBA_OPCODE = (31u << OPCODE_SHIFT | 758u << 1), DCBZ_OPCODE = (31u << OPCODE_SHIFT | 1014u << 1), @@ -1364,11 +1421,17 @@ inline void lwax( Register d, Register s1, Register s2); inline void lwa( Register d, int si16, Register s1); + // 4 bytes reversed + inline void lwbrx( Register d, Register s1, Register s2); + // 2 bytes inline void lhzx( Register d, Register s1, Register s2); inline void lhz( Register d, int si16, Register s1); inline void lhzu( Register d, int si16, Register s1); + // 2 bytes reversed + inline void lhbrx( Register d, Register s1, Register s2); + // 2 bytes inline void lhax( Register d, Register s1, Register s2); inline void lha( Register d, int si16, Register s1); @@ -1412,6 +1475,25 @@ inline void mcrf( ConditionRegister crd, ConditionRegister cra); inline void mtcr( Register s); + // Special purpose registers + // Exception Register + inline void mtxer(Register s1); + inline void mfxer(Register d); + // Vector Register Save Register + inline void mtvrsave(Register s1); + inline void mfvrsave(Register d); + // Timebase + inline void mftb(Register d); + // Introduced with Power 8: + // Data Stream Control Register + inline void mtdscr(Register s1); + inline void mfdscr(Register d ); + // Transactional Memory Registers + inline void mftfhar(Register d); + inline void mftfiar(Register d); + inline void mftexasr(Register d); + inline void mftexasru(Register d); + // PPC 1, section 2.4.1 Branch Instructions inline void b( address a, relocInfo::relocType rt = relocInfo::none); inline void b( Label& L); @@ -1852,16 +1934,51 @@ inline void mtvscr( VectorRegister b); inline void mfvscr( VectorRegister d); + // AES (introduced with Power 8) + inline void vcipher( VectorRegister d, VectorRegister a, VectorRegister b); + inline void vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b); + inline void vncipher( VectorRegister d, VectorRegister a, VectorRegister b); + inline void vncipherlast(VectorRegister d, VectorRegister a, VectorRegister b); + inline void vsbox( VectorRegister d, VectorRegister a); + + // SHA (introduced with Power 8) + // Not yet implemented. + + // Vector Binary Polynomial Multiplication (introduced with Power 8) + inline void vpmsumb( VectorRegister d, VectorRegister a, VectorRegister b); + inline void vpmsumd( VectorRegister d, VectorRegister a, VectorRegister b); + inline void vpmsumh( VectorRegister d, VectorRegister a, VectorRegister b); + inline void vpmsumw( VectorRegister d, VectorRegister a, VectorRegister b); + + // Vector Permute and Xor (introduced with Power 8) + inline void vpermxor( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c); + + // Transactional Memory instructions (introduced with Power 8) + inline void tbegin_(); // R=0 + inline void tbeginrot_(); // R=1 Rollback-Only Transaction + inline void tend_(); // A=0 + inline void tendall_(); // A=1 + inline void tabort_(Register a); + inline void tabortwc_(int t, Register a, Register b); + inline void tabortwci_(int t, Register a, int si); + inline void tabortdc_(int t, Register a, Register b); + inline void tabortdci_(int t, Register a, int si); + inline void tsuspend_(); // tsr with L=0 + inline void tresume_(); // tsr with L=1 + inline void tcheck(int f); + // The following encoders use r0 as second operand. These instructions // read r0 as '0'. inline void lwzx( Register d, Register s2); inline void lwz( Register d, int si16); inline void lwax( Register d, Register s2); inline void lwa( Register d, int si16); + inline void lwbrx(Register d, Register s2); inline void lhzx( Register d, Register s2); inline void lhz( Register d, int si16); inline void lhax( Register d, Register s2); inline void lha( Register d, int si16); + inline void lhbrx(Register d, Register s2); inline void lbzx( Register d, Register s2); inline void lbz( Register d, int si16); inline void ldx( Register d, Register s2); --- ./hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -263,10 +263,14 @@ inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));} inline void Assembler::lwa( Register d, int si16, Register s1) { emit_int32(LWA_OPCODE | rt(d) | ds(si16) | ra0mem(s1));} +inline void Assembler::lwbrx( Register d, Register s1, Register s2) { emit_int32(LWBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));} + inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));} inline void Assembler::lhz( Register d, int si16, Register s1) { emit_int32(LHZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));} inline void Assembler::lhzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));} +inline void Assembler::lhbrx( Register d, Register s1, Register s2) { emit_int32(LHBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));} + inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));} inline void Assembler::lha( Register d, int si16, Register s1) { emit_int32(LHA_OPCODE | rt(d) | d1(si16) | ra0mem(s1));} inline void Assembler::lhau( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));} @@ -308,6 +312,25 @@ { emit_int32(MCRF_OPCODE | bf(crd) | bfa(cra)); } inline void Assembler::mtcr( Register s) { Assembler::mtcrf(0xff, s); } +// Special purpose registers +// Exception Register +inline void Assembler::mtxer(Register s1) { emit_int32(MTXER_OPCODE | rs(s1)); } +inline void Assembler::mfxer(Register d ) { emit_int32(MFXER_OPCODE | rt(d)); } +// Vector Register Save Register +inline void Assembler::mtvrsave(Register s1) { emit_int32(MTVRSAVE_OPCODE | rs(s1)); } +inline void Assembler::mfvrsave(Register d ) { emit_int32(MFVRSAVE_OPCODE | rt(d)); } +// Timebase +inline void Assembler::mftb(Register d ) { emit_int32(MFTB_OPCODE | rt(d)); } +// Introduced with Power 8: +// Data Stream Control Register +inline void Assembler::mtdscr(Register s1) { emit_int32(MTDSCR_OPCODE | rs(s1)); } +inline void Assembler::mfdscr(Register d ) { emit_int32(MFDSCR_OPCODE | rt(d)); } +// Transactional Memory Registers +inline void Assembler::mftfhar(Register d ) { emit_int32(MFTFHAR_OPCODE | rt(d)); } +inline void Assembler::mftfiar(Register d ) { emit_int32(MFTFIAR_OPCODE | rt(d)); } +inline void Assembler::mftexasr(Register d ) { emit_int32(MFTEXASR_OPCODE | rt(d)); } +inline void Assembler::mftexasru(Register d ) { emit_int32(MFTEXASRU_OPCODE | rt(d)); } + // SAP JVM 2006-02-13 PPC branch instruction. // PPC 1, section 2.4.1 Branch Instructions inline void Assembler::b( address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(0), rt); } @@ -731,15 +754,50 @@ inline void Assembler::mtvscr( VectorRegister b) { emit_int32( MTVSCR_OPCODE | vrb(b)); } inline void Assembler::mfvscr( VectorRegister d) { emit_int32( MFVSCR_OPCODE | vrt(d)); } +// AES (introduced with Power 8) +inline void Assembler::vcipher( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCIPHER_OPCODE | vrt(d) | vra(a) | vrb(b)); } +inline void Assembler::vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCIPHERLAST_OPCODE | vrt(d) | vra(a) | vrb(b)); } +inline void Assembler::vncipher( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNCIPHER_OPCODE | vrt(d) | vra(a) | vrb(b)); } +inline void Assembler::vncipherlast(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNCIPHERLAST_OPCODE | vrt(d) | vra(a) | vrb(b)); } +inline void Assembler::vsbox( VectorRegister d, VectorRegister a) { emit_int32( VSBOX_OPCODE | vrt(d) | vra(a) ); } + +// SHA (introduced with Power 8) +// Not yet implemented. + +// Vector Binary Polynomial Multiplication (introduced with Power 8) +inline void Assembler::vpmsumb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMB_OPCODE | vrt(d) | vra(a) | vrb(b)); } +inline void Assembler::vpmsumd( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMD_OPCODE | vrt(d) | vra(a) | vrb(b)); } +inline void Assembler::vpmsumh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMH_OPCODE | vrt(d) | vra(a) | vrb(b)); } +inline void Assembler::vpmsumw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMW_OPCODE | vrt(d) | vra(a) | vrb(b)); } + +// Vector Permute and Xor (introduced with Power 8) +inline void Assembler::vpermxor( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VPMSUMW_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); } + +// Transactional Memory instructions (introduced with Power 8) +inline void Assembler::tbegin_() { emit_int32( TBEGIN_OPCODE | rc(1)); } +inline void Assembler::tbeginrot_() { emit_int32( TBEGIN_OPCODE | /*R=1*/ 1u << (31-10) | rc(1)); } +inline void Assembler::tend_() { emit_int32( TEND_OPCODE | rc(1)); } +inline void Assembler::tendall_() { emit_int32( TEND_OPCODE | /*A=1*/ 1u << (31-6) | rc(1)); } +inline void Assembler::tabort_(Register a) { emit_int32( TABORT_OPCODE | ra(a) | rc(1)); } +inline void Assembler::tabortwc_(int t, Register a, Register b) { emit_int32( TABORTWC_OPCODE | to(t) | ra(a) | rb(b) | rc(1)); } +inline void Assembler::tabortwci_(int t, Register a, int si) { emit_int32( TABORTWCI_OPCODE | to(t) | ra(a) | sh1620(si) | rc(1)); } +inline void Assembler::tabortdc_(int t, Register a, Register b) { emit_int32( TABORTDC_OPCODE | to(t) | ra(a) | rb(b) | rc(1)); } +inline void Assembler::tabortdci_(int t, Register a, int si) { emit_int32( TABORTDCI_OPCODE | to(t) | ra(a) | sh1620(si) | rc(1)); } +inline void Assembler::tsuspend_() { emit_int32( TSR_OPCODE | rc(1)); } +inline void Assembler::tresume_() { emit_int32( TSR_OPCODE | /*L=1*/ 1u << (31-10) | rc(1)); } +inline void Assembler::tcheck(int f) { emit_int32( TCHECK_OPCODE | bf(f)); } + // ra0 version inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE | rt(d) | rb(s2));} inline void Assembler::lwz( Register d, int si16 ) { emit_int32( LWZ_OPCODE | rt(d) | d1(si16));} inline void Assembler::lwax( Register d, Register s2) { emit_int32( LWAX_OPCODE | rt(d) | rb(s2));} inline void Assembler::lwa( Register d, int si16 ) { emit_int32( LWA_OPCODE | rt(d) | ds(si16));} +inline void Assembler::lwbrx(Register d, Register s2) { emit_int32( LWBRX_OPCODE| rt(d) | rb(s2));} inline void Assembler::lhzx( Register d, Register s2) { emit_int32( LHZX_OPCODE | rt(d) | rb(s2));} inline void Assembler::lhz( Register d, int si16 ) { emit_int32( LHZ_OPCODE | rt(d) | d1(si16));} inline void Assembler::lhax( Register d, Register s2) { emit_int32( LHAX_OPCODE | rt(d) | rb(s2));} inline void Assembler::lha( Register d, int si16 ) { emit_int32( LHA_OPCODE | rt(d) | d1(si16));} +inline void Assembler::lhbrx(Register d, Register s2) { emit_int32( LHBRX_OPCODE| rt(d) | rb(s2));} inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));} inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));} inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));} --- ./hotspot/src/cpu/ppc/vm/compiledIC_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/compiledIC_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,34 +50,6 @@ return is_icholder_entry(call->destination()); } -//----------------------------------------------------------------------------- -// High-level access to an inline cache. Guaranteed to be MT-safe. - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // Search for the ic_call at the given address. - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; - } -} - // ---------------------------------------------------------------------------- // A PPC CompiledStaticCall looks like this: @@ -203,7 +175,7 @@ if (TraceICs) { ResourceMark rm; tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", - instruction_address(), + p2i(instruction_address()), callee->name_and_sig_as_C_string()); } --- ./hotspot/src/cpu/ppc/vm/frame_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/frame_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -140,7 +140,7 @@ void frame::patch_pc(Thread* thread, address pc) { if (TracePcPatching) { tty->print_cr("patch_pc at address " PTR_FORMAT " [" PTR_FORMAT " -> " PTR_FORMAT "]", - &((address*) _sp)[-1], ((address*) _sp)[-1], pc); + p2i(&((address*) _sp)[-1]), p2i(((address*) _sp)[-1]), p2i(pc)); } own_abi()->lr = (uint64_t)pc; _cb = CodeCache::find_blob(pc); --- ./hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -37,6 +37,8 @@ // signatures accordingly. const bool CCallingConventionRequiresIntsAsLongs = true; +#define SUPPORTS_NATIVE_CX8 + // The PPC CPUs are NOT multiple-copy-atomic. #define CPU_NOT_MULTIPLE_COPY_ATOMIC --- ./hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,7 +25,6 @@ #include "precompiled.hpp" -#include "asm/assembler.hpp" #include "asm/macroAssembler.inline.hpp" #include "interp_masm_ppc_64.hpp" #include "interpreter/interpreterRuntime.hpp" @@ -119,9 +118,15 @@ // Call the Interpreter::remove_activation_preserving_args_entry() // func to get the address of the same-named entrypoint in the // generated interpreter code. +#if defined(ABI_ELFv2) + call_c(CAST_FROM_FN_PTR(address, + Interpreter::remove_activation_preserving_args_entry), + relocInfo::none); +#else call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, Interpreter::remove_activation_preserving_args_entry), relocInfo::none); +#endif // Jump to Interpreter::_remove_activation_preserving_args_entry. mtctr(R3_RET); @@ -331,29 +336,40 @@ void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int bcp_offset, Register Rdst, signedOrNot is_signed) { +#if defined(VM_LITTLE_ENDIAN) + if (bcp_offset) { + load_const_optimized(Rdst, bcp_offset); + lhbrx(Rdst, R14_bcp, Rdst); + } else { + lhbrx(Rdst, R14_bcp); + } + if (is_signed == Signed) { + extsh(Rdst, Rdst); + } +#else // Read Java big endian format. if (is_signed == Signed) { lha(Rdst, bcp_offset, R14_bcp); } else { lhz(Rdst, bcp_offset, R14_bcp); } -#if 0 - assert(Rtmp != Rdst, "need separate temp register"); - Register Rfirst = Rtmp; - lbz(Rfirst, bcp_offset, R14_bcp); // first byte - lbz(Rdst, bcp_offset+1, R14_bcp); // second byte - - // Rdst = ((Rfirst<<8) & 0xFF00) | (Rdst &~ 0xFF00) - rldimi(/*RA=*/Rdst, /*RS=*/Rfirst, /*sh=*/8, /*mb=*/48); - if (is_signed == Signed) { - extsh(Rdst, Rdst); - } #endif } void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int bcp_offset, Register Rdst, signedOrNot is_signed) { +#if defined(VM_LITTLE_ENDIAN) + if (bcp_offset) { + load_const_optimized(Rdst, bcp_offset); + lwbrx(Rdst, R14_bcp, Rdst); + } else { + lwbrx(Rdst, R14_bcp); + } + if (is_signed == Signed) { + extsw(Rdst, Rdst); + } +#else // Read Java big endian format. if (bcp_offset & 3) { // Offset unaligned? load_const_optimized(Rdst, bcp_offset); @@ -369,19 +385,27 @@ lwz(Rdst, bcp_offset, R14_bcp); } } +#endif } + // Load the constant pool cache index from the bytecode stream. // // Kills / writes: // - Rdst, Rscratch void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size) { assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); + // Cache index is always in the native format, courtesy of Rewriter. if (index_size == sizeof(u2)) { - get_2_byte_integer_at_bcp(bcp_offset, Rdst, Unsigned); + lhz(Rdst, bcp_offset, R14_bcp); } else if (index_size == sizeof(u4)) { assert(EnableInvokeDynamic, "giant index used only for JSR 292"); - get_4_byte_integer_at_bcp(bcp_offset, Rdst, Signed); + if (bcp_offset & 3) { + load_const_optimized(Rdst, bcp_offset); + lwax(Rdst, R14_bcp, Rdst); + } else { + lwa(Rdst, bcp_offset, R14_bcp); + } assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); nand(Rdst, Rdst, Rdst); // convert to plain index } else if (index_size == sizeof(u1)) { @@ -398,6 +422,29 @@ add(cache, R27_constPoolCache, cache); } +// Load 4-byte signed or unsigned integer in Java format (that is, big-endian format) +// from (Rsrc)+offset. +void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset, + signedOrNot is_signed) { +#if defined(VM_LITTLE_ENDIAN) + if (offset) { + load_const_optimized(Rdst, offset); + lwbrx(Rdst, Rdst, Rsrc); + } else { + lwbrx(Rdst, Rsrc); + } + if (is_signed == Signed) { + extsw(Rdst, Rdst); + } +#else + if (is_signed == Signed) { + lwa(Rdst, offset, Rsrc); + } else { + lwz(Rdst, offset, Rsrc); + } +#endif +} + // Load object from cpool->resolved_references(index). void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) { assert_different_registers(result, index); @@ -498,6 +545,9 @@ cmplw(CCR0, Rindex, Rlength); sldi(RsxtIndex, RsxtIndex, index_shift); blt(CCR0, LnotOOR); + // Index should be in R17_tos, array should be in R4_ARG2. + mr(R17_tos, Rindex); + mr(R4_ARG2, Rarray); load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); mtctr(Rtmp); bctr(); @@ -1632,6 +1682,228 @@ } } +// Argument and return type profilig. +// kills: tmp, tmp2, R0, CR0, CR1 +void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base, + RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2) { + Label do_nothing, do_update; + + // tmp2 = obj is allowed + assert_different_registers(obj, mdo_addr_base, tmp, R0); + assert_different_registers(tmp2, mdo_addr_base, tmp, R0); + const Register klass = tmp2; + + verify_oop(obj); + + ld(tmp, mdo_addr_offs, mdo_addr_base); + + // Set null_seen if obj is 0. + cmpdi(CCR0, obj, 0); + ori(R0, tmp, TypeEntries::null_seen); + beq(CCR0, do_update); + + load_klass(klass, obj); + + clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); + // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); + cmpd(CCR1, R0, klass); + // Klass seen before, nothing to do (regardless of unknown bit). + //beq(CCR1, do_nothing); + + andi_(R0, klass, TypeEntries::type_unknown); + // Already unknown. Nothing to do anymore. + //bne(CCR0, do_nothing); + crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2); // cr0 eq = cr1 eq or cr0 ne + beq(CCR0, do_nothing); + + clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); + orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). + beq(CCR0, do_update); // First time here. Set profile type. + + // Different than before. Cannot keep accurate profile. + ori(R0, tmp, TypeEntries::type_unknown); + + bind(do_update); + // update profile + std(R0, mdo_addr_offs, mdo_addr_base); + + align(32, 12); + bind(do_nothing); +} + +void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) { + if (!ProfileInterpreter) { + return; + } + + assert_different_registers(callee, tmp1, tmp2, R28_mdx); + + if (MethodData::profile_arguments() || MethodData::profile_return()) { + Label profile_continue; + + test_method_data_pointer(profile_continue); + + int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); + + lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx); + cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); + bne(CCR0, profile_continue); + + if (MethodData::profile_arguments()) { + Label done; + int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); + add(R28_mdx, off_to_args, R28_mdx); + + for (int i = 0; i < TypeProfileArgsLimit; i++) { + if (i > 0 || MethodData::profile_return()) { + // If return value type is profiled we may have no argument to profile. + ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx); + cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count()); + addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count()); + blt(CCR0, done); + } + ld(tmp1, in_bytes(Method::const_offset()), callee); + lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1); + // Stack offset o (zero based) from the start of the argument + // list, for n arguments translates into offset n - o - 1 from + // the end of the argument list. But there's an extra slot at + // the top of the stack. So the offset is n - o from Lesp. + ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx); + subf(tmp1, tmp2, tmp1); + + sldi(tmp1, tmp1, Interpreter::logStackElementSize); + ldx(tmp1, tmp1, R15_esp); + + profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1); + + int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); + addi(R28_mdx, R28_mdx, to_add); + off_to_args += to_add; + } + + if (MethodData::profile_return()) { + ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx); + addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count()); + } + + bind(done); + + if (MethodData::profile_return()) { + // We're right after the type profile for the last + // argument. tmp1 is the number of cells left in the + // CallTypeData/VirtualCallTypeData to reach its end. Non null + // if there's a return to profile. + assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); + sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size)); + add(R28_mdx, tmp1, R28_mdx); + } + } else { + assert(MethodData::profile_return(), "either profile call args or call ret"); + update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size())); + } + + // Mdp points right after the end of the + // CallTypeData/VirtualCallTypeData, right after the cells for the + // return value type if there's one. + align(32, 12); + bind(profile_continue); + } +} + +void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) { + assert_different_registers(ret, tmp1, tmp2); + if (ProfileInterpreter && MethodData::profile_return()) { + Label profile_continue; + + test_method_data_pointer(profile_continue); + + if (MethodData::profile_return_jsr292_only()) { + // If we don't profile all invoke bytecodes we must make sure + // it's a bytecode we indeed profile. We can't go back to the + // begining of the ProfileData we intend to update to check its + // type because we're right after it and we don't known its + // length. + lbz(tmp1, 0, R14_bcp); + lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method); + cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic); + cmpwi(CCR1, tmp1, Bytecodes::_invokehandle); + cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); + cmpwi(CCR1, tmp2, vmIntrinsics::_compiledLambdaForm); + cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); + bne(CCR0, profile_continue); + } + + profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2); + + align(32, 12); + bind(profile_continue); + } +} + +void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) { + if (ProfileInterpreter && MethodData::profile_parameters()) { + Label profile_continue, done; + + test_method_data_pointer(profile_continue); + + // Load the offset of the area within the MDO used for + // parameters. If it's negative we're not profiling any parameters. + lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx); + cmpwi(CCR0, tmp1, 0); + blt(CCR0, profile_continue); + + // Compute a pointer to the area for parameters from the offset + // and move the pointer to the slot for the last + // parameters. Collect profiling from last parameter down. + // mdo start + parameters offset + array length - 1 + + // Pointer to the parameter area in the MDO. + const Register mdp = tmp1; + add(mdp, tmp1, R28_mdx); + + // Pffset of the current profile entry to update. + const Register entry_offset = tmp2; + // entry_offset = array len in number of cells + ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp); + + int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); + assert(off_base % DataLayout::cell_size == 0, "should be a number of cells"); + + // entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field + addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size)); + // entry_offset in bytes + sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size)); + + Label loop; + align(32, 12); + bind(loop); + + // Load offset on the stack from the slot for this parameter. + ld(tmp3, entry_offset, mdp); + sldi(tmp3, tmp3, Interpreter::logStackElementSize); + neg(tmp3, tmp3); + // Read the parameter from the local area. + ldx(tmp3, tmp3, R18_locals); + + // Make entry_offset now point to the type field for this parameter. + int type_base = in_bytes(ParametersTypeData::type_offset(0)); + assert(type_base > off_base, "unexpected"); + addi(entry_offset, entry_offset, type_base - off_base); + + // Profile the parameter. + profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3); + + // Go to next parameter. + int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base); + cmpdi(CCR0, entry_offset, off_base + delta); + addi(entry_offset, entry_offset, -delta); + bge(CCR0, loop); + + align(32, 12); + bind(profile_continue); + } +} + // Add a InterpMonitorElem to stack (see frame_sparc.hpp). void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) { @@ -1993,20 +2265,19 @@ bne(CCR0, test); address fd = CAST_FROM_FN_PTR(address, verify_return_address); - unsigned int nbytes_save = 10*8; // 10 volatile gprs - - save_LR_CR(Rtmp); + const int nbytes_save = 11*8; // volatile gprs except R0 + save_volatile_gprs(R1_SP, -nbytes_save); // except R0 + save_LR_CR(Rtmp); // Save in old frame. push_frame_reg_args(nbytes_save, Rtmp); - save_volatile_gprs(R1_SP, 112); // except R0 load_const_optimized(Rtmp, fd, R0); mr_if_needed(R4_ARG2, reg); mr(R3_ARG1, R19_method); call_c(Rtmp); // call C - restore_volatile_gprs(R1_SP, 112); // except R0 pop_frame(); restore_LR_CR(Rtmp); + restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 b(skip); // Perform a more elaborate out-of-line call. --- ./hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -130,6 +130,7 @@ void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2)); + void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed); // common code @@ -254,6 +255,12 @@ void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call); void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call); + // Argument and return type profiling. + void profile_obj_type(Register obj, Register mdo_addr_base, RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2); + void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual); + void profile_return_type(Register ret, Register tmp1, Register tmp2); + void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4); + #endif // !CC_INTERP // Debugging --- ./hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,7 +24,6 @@ */ #include "precompiled.hpp" -#include "asm/assembler.hpp" #include "asm/macroAssembler.inline.hpp" #include "interpreter/bytecodeHistogram.hpp" #include "interpreter/interpreter.hpp" --- ./hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -806,6 +806,7 @@ // For verify_oops. void MacroAssembler::save_volatile_gprs(Register dst, int offset) { + std(R2, offset, dst); offset += 8; std(R3, offset, dst); offset += 8; std(R4, offset, dst); offset += 8; std(R5, offset, dst); offset += 8; @@ -820,6 +821,7 @@ // For verify_oops. void MacroAssembler::restore_volatile_gprs(Register src, int offset) { + ld(R2, offset, src); offset += 8; ld(R3, offset, src); offset += 8; ld(R4, offset, src); offset += 8; ld(R5, offset, src); offset += 8; @@ -1186,6 +1188,16 @@ call_VM(oop_result, entry_point, check_exceptions); } +void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, + bool check_exceptions) { + // R3_ARG1 is reserved for the thread + mr_if_needed(R4_ARG2, arg_1); + assert(arg_2 != R4_ARG2, "smashed argument"); + mr_if_needed(R5_ARG3, arg_2); + mr_if_needed(R6_ARG4, arg_3); + call_VM(oop_result, entry_point, check_exceptions); +} + void MacroAssembler::call_VM_leaf(address entry_point) { call_VM_leaf_base(entry_point); } @@ -2365,7 +2377,7 @@ #endif // INCLUDE_ALL_GCS // Values for last_Java_pc, and last_Java_sp must comply to the rules -// in frame_ppc64.hpp. +// in frame_ppc.hpp. void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) { // Always set last_Java_pc and flags first because once last_Java_sp // is visible has_last_Java_frame is true and users will look at the @@ -2492,6 +2504,7 @@ } void MacroAssembler::decode_klass_not_null(Register dst, Register src) { + assert(dst != R0, "Dst reg may not be R0, as R0 is used here."); if (src == noreg) src = dst; Register shifted_src = src; if (Universe::narrow_klass_shift() != 0 || @@ -2526,14 +2539,11 @@ void MacroAssembler::reinit_heapbase(Register d, Register tmp) { if (Universe::heap() != NULL) { - if (Universe::narrow_oop_base() == NULL) { - Assembler::xorr(R30, R30, R30); - } else { - load_const(R30, Universe::narrow_ptrs_base(), tmp); - } + load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp); } else { - load_const(R30, Universe::narrow_ptrs_base_addr(), tmp); - ld(R30, 0, R30); + // Heap not yet allocated. Load indirectly. + int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true); + ld(R30, simm16_offset, R30); } } @@ -3060,35 +3070,27 @@ if (!VerifyOops) { return; } - // Will be preserved. - Register tmp = R11; - assert(oop != tmp, "precondition"); - unsigned int nbytes_save = 10*8; // 10 volatile gprs + address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address(); - // save tmp - mr(R0, tmp); - // kill tmp - save_LR_CR(tmp); + const Register tmp = R11; // Will be preserved. + const int nbytes_save = 11*8; // Volatile gprs except R0. + save_volatile_gprs(R1_SP, -nbytes_save); // except R0 + + if (oop == tmp) mr(R4_ARG2, oop); + save_LR_CR(tmp); // save in old frame push_frame_reg_args(nbytes_save, tmp); - // restore tmp - mr(tmp, R0); - save_volatile_gprs(R1_SP, 112); // except R0 // load FunctionDescriptor** / entry_address * - load_const(tmp, fd); + load_const_optimized(tmp, fd, R0); // load FunctionDescriptor* / entry_address ld(tmp, 0, tmp); - mr(R4_ARG2, oop); - load_const(R3_ARG1, (address)msg); - // call destination for its side effect + if (oop != tmp) mr_if_needed(R4_ARG2, oop); + load_const_optimized(R3_ARG1, (address)msg, R0); + // Call destination for its side effect. call_c(tmp); - restore_volatile_gprs(R1_SP, 112); // except R0 + pop_frame(); - // save tmp - mr(R0, tmp); - // kill tmp restore_LR_CR(tmp); - // restore tmp - mr(tmp, R0); + restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 } const char* stop_types[] = { @@ -3099,7 +3101,7 @@ }; static void stop_on_request(int tp, const char* msg) { - tty->print("PPC assembly code requires stop: (%s) %s\n", (void *)stop_types[tp%/*stop_end*/4], msg); + tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg); guarantee(false, err_msg("PPC assembly code requires stop: %s", msg)); } --- ./hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -368,6 +368,7 @@ void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); + void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true); void call_VM_leaf(address entry_point); void call_VM_leaf(address entry_point, Register arg_1); void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); --- ./hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -464,7 +464,7 @@ strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23"; tty->print_cr("MH %s %s="INTPTR_FORMAT " sp=" INTPTR_FORMAT, - adaptername, mh_reg_name, (intptr_t) mh, entry_sp); + adaptername, mh_reg_name, (intptr_t) mh, (intptr_t) entry_sp); if (Verbose) { tty->print_cr("Registers:"); --- ./hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,6 +1,6 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. - * Copyright 2012, 2013 SAP AG. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -100,10 +100,7 @@ MacroAssembler* a = new MacroAssembler(&cb); // Patch the call. - if (ReoptimizeCallSequences && - a->is_within_range_of_b(dest, addr_call)) { - a->bl(dest); - } else { + if (!ReoptimizeCallSequences || !a->is_within_range_of_b(dest, addr_call)) { address trampoline_stub_addr = get_trampoline(); // We did not find a trampoline stub because the current codeblob @@ -115,9 +112,12 @@ // Patch the constant in the call's trampoline stub. NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest); + dest = trampoline_stub_addr; + } - a->bl(trampoline_stub_addr); - } + OrderAccess::release(); + a->bl(dest); + ICache::ppc64_flush_icache_bytes(addr_call, code_size); } @@ -147,9 +147,9 @@ address addr = addr_at(0); if (!NativeCall::is_call_at(addr)) { - tty->print_cr("not a NativeCall at " PTR_FORMAT, addr); + tty->print_cr("not a NativeCall at " PTR_FORMAT, p2i(addr)); // TODO: PPC port: Disassembler::decode(addr - 20, addr + 20, tty); - fatal(err_msg("not a NativeCall at " PTR_FORMAT, addr)); + fatal(err_msg("not a NativeCall at " PTR_FORMAT, p2i(addr))); } } #endif // ASSERT @@ -160,9 +160,9 @@ NativeInstruction::verify(); if (!NativeFarCall::is_far_call_at(addr)) { - tty->print_cr("not a NativeFarCall at " PTR_FORMAT, addr); + tty->print_cr("not a NativeFarCall at " PTR_FORMAT, p2i(addr)); // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty); - fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, addr)); + fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, p2i(addr))); } } #endif // ASSERT @@ -306,9 +306,9 @@ if (! (cb != NULL && MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) && ! (cb != NULL && MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) && ! MacroAssembler::is_bl(*((int*) addr))) { - tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, addr); + tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr)); // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty); - fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, addr)); + fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr))); } } } @@ -344,9 +344,9 @@ NativeInstruction::verify(); if (!NativeJump::is_jump_at(addr)) { - tty->print_cr("not a NativeJump at " PTR_FORMAT, addr); + tty->print_cr("not a NativeJump at " PTR_FORMAT, p2i(addr)); // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty); - fatal(err_msg("not a NativeJump at " PTR_FORMAT, addr)); + fatal(err_msg("not a NativeJump at " PTR_FORMAT, p2i(addr))); } } #endif // ASSERT --- ./hotspot/src/cpu/ppc/vm/ppc.ad Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/ppc.ad Wed Feb 04 12:14:39 2015 -0800 @@ -1249,6 +1249,7 @@ // Emit the trampoline stub which will be related to the branch-and-link below. CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset); + if (Compile::current()->env()->failing()) { return offsets; } // Code cache may be full. __ relocate(rtype); } @@ -1329,7 +1330,7 @@ if (!false /* TODO: PPC port C->is_frameless_method()*/) { st->print("save return pc\n\t"); - st->print("push frame %d\n\t", -framesize); + st->print("push frame %ld\n\t", -framesize); } } #endif @@ -1412,7 +1413,7 @@ while (bang_offset <= bang_end) { // Need at least one stack bang at end of shadow zone. - // Again I had to copy code, this time from assembler_ppc64.cpp, + // Again I had to copy code, this time from assembler_ppc.cpp, // bang_stack_with_offset - see there for comments. // Stack grows down, caller passes positive offset. @@ -1937,8 +1938,9 @@ // -------------------------------------------------------------------- // Check for hi bits still needing moving. Only happens for misaligned // arguments to native calls. - if (src_hi == dst_hi) + if (src_hi == dst_hi) { return ppc64Opcode_none; // Self copy; no move. + } ShouldNotReachHere(); return ppc64Opcode_undefined; @@ -1960,14 +1962,15 @@ } uint MachNopNode::size(PhaseRegAlloc *ra_) const { - return _count * 4; + return _count * 4; } #ifndef PRODUCT void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const { int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); - int reg = ra_->get_reg_first(this); - st->print("ADDI %s, SP, %d \t// box node", Matcher::regName[reg], offset); + char reg_str[128]; + ra_->dump_register(this, reg_str); + st->print("ADDI %s, SP, %d \t// box node", reg_str, offset); } #endif @@ -2002,7 +2005,7 @@ // Inline_cache contains a klass. Register ic_klass = as_Register(Matcher::inline_cache_reg_encode()); - Register receiver_klass = R0; // tmp + Register receiver_klass = R12_scratch2; // tmp assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1); assert(R11_scratch1 == R11, "need prologue scratch register"); @@ -3486,6 +3489,7 @@ // Emit the trampoline stub which will be related to the branch-and-link below. CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset); + if (Compile::current()->env()->failing()) { return; } // Code cache may be full. __ relocate(_optimized_virtual ? relocInfo::opt_virtual_call_type : relocInfo::static_call_type); } @@ -3529,6 +3533,7 @@ // Emit the trampoline stub which will be related to the branch-and-link below. CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset); + if (ra_->C->env()->failing()) { return; } // Code cache may be full. assert(_optimized_virtual, "methodHandle call should be a virtual call"); __ relocate(relocInfo::opt_virtual_call_type); } @@ -3579,9 +3584,7 @@ const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none); const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const); CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset()); - - if (ra_->C->env()->failing()) - return; + if (ra_->C->env()->failing()) { return; } // Code cache may be full. // Build relocation at call site with ic position as data. assert((_load_ic_hi_node != NULL && _load_ic_node == NULL) || @@ -5640,19 +5643,6 @@ ins_pipe(pipe_class_memory); %} -//// Load compressed klass and decode it if narrow_klass_shift == 0. -//// TODO: will narrow_klass_shift ever be 0? -//instruct decodeNKlass2Klass(iRegPdst dst, memory mem) %{ -// match(Set dst (DecodeNKlass (LoadNKlass mem))); -// predicate(false /* TODO: PPC port Universe::narrow_klass_shift() == 0*); -// ins_cost(MEMORY_REF_COST); -// -// format %{ "LWZ $dst, $mem \t// DecodeNKlass (unscaled)" %} -// size(4); -// ins_encode( enc_lwz(dst, mem) ); -// ins_pipe(pipe_class_memory); -//%} - // Load Klass Pointer instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{ match(Set dst (LoadKlass mem)); @@ -6072,11 +6062,15 @@ %} %} -instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{ +// We have seen a safepoint between the hi and lo parts, and this node was handled +// as an oop. Therefore this needs a match rule so that build_oop_map knows this is +// not a narrow oop. +instruct loadConNKlass_hi(iRegNdst dst, immNKlass_NM src) %{ + match(Set dst src); effect(DEF dst, USE src); ins_cost(DEFAULT_COST); - format %{ "LIS $dst, $src \t// narrow oop hi" %} + format %{ "LIS $dst, $src \t// narrow klass hi" %} size(4); ins_encode %{ // TODO: PPC port $archOpcode(ppc64Opcode_addis); @@ -6086,6 +6080,21 @@ ins_pipe(pipe_class_default); %} +// As loadConNKlass_hi this must be recognized as narrow klass, not oop! +instruct loadConNKlass_mask(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{ + match(Set dst src1); + effect(TEMP src2); + ins_cost(DEFAULT_COST); + + format %{ "MASK $dst, $src2, 0xFFFFFFFF" %} // mask + size(4); + ins_encode %{ + // TODO: PPC port $archOpcode(ppc64Opcode_rldicl); + __ clrldi($dst$$Register, $src2$$Register, 0x20); + %} + ins_pipe(pipe_class_default); +%} + // This needs a match rule so that build_oop_map knows this is // not a narrow oop. instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{ @@ -6093,10 +6102,10 @@ effect(TEMP src2); ins_cost(DEFAULT_COST); - format %{ "ADDI $dst, $src1, $src2 \t// narrow oop lo" %} - size(4); - ins_encode %{ - // TODO: PPC port $archOpcode(ppc64Opcode_addi); + format %{ "ORI $dst, $src1, $src2 \t// narrow klass lo" %} + size(4); + ins_encode %{ + // TODO: PPC port $archOpcode(ppc64Opcode_ori); intptr_t Csrc = Klass::encode_klass((Klass *)$src1$$constant); assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder"); int klass_index = __ oop_recorder()->find_index((Klass *)$src1$$constant); @@ -6127,10 +6136,11 @@ MachNode *m2 = m1; if (!Assembler::is_uimm((jlong)Klass::encode_klass((Klass *)op_src->constant()), 31)) { // Value might be 1-extended. Mask out these bits. - m2 = new (C) clearMs32bNode(); + m2 = new (C) loadConNKlass_maskNode(); m2->add_req(NULL, m1); m2->_opnds[0] = op_dst; - m2->_opnds[1] = op_dst; + m2->_opnds[1] = op_src; + m2->_opnds[2] = op_dst; ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); nodes->push(m2); } @@ -6975,7 +6985,7 @@ size(4); ins_encode %{ // TODO: PPC port $archOpcode(ppc64Opcode_rldicl); - __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_oop_shift(), 32); + __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_klass_shift(), 32); %} ins_pipe(pipe_class_default); %} --- ./hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,7 +24,6 @@ */ #include "precompiled.hpp" -#include "asm/assembler.hpp" #include "asm/macroAssembler.inline.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_ppc.hpp" @@ -39,9 +38,6 @@ #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" #include "utilities/top.hpp" -#ifdef COMPILER2 -#include "opto/runtime.hpp" -#endif #include "runtime/thread.inline.hpp" #define __ _masm-> @@ -216,7 +212,7 @@ { BLOCK_COMMENT("Call frame manager or native entry."); // Call frame manager or native entry. - Register r_new_arg_entry = R14; // PPC_state; + Register r_new_arg_entry = R14; assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr, r_arg_method, r_arg_thread); --- ./hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -90,7 +90,7 @@ // Thread will be loaded to R3_ARG1. // Target class oop is in register R5_ARG3 by convention! - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3)); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3); // Above call must not return here since exception pending. DEBUG_ONLY(__ should_not_reach_here();) return entry; @@ -171,12 +171,20 @@ // Compiled code destroys templateTableBase, reload. __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); + if (state == atos) { + __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2); + } + const Register cache = R11_scratch1; const Register size = R12_scratch2; __ get_cache_and_index_at_bcp(cache, 1, index_size); - // Big Endian (get least significant byte of 64 bit value): + // Get least significant byte of 64 bit value: +#if defined(VM_LITTLE_ENDIAN) + __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache); +#else __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache); +#endif __ sldi(size, size, Interpreter::logStackElementSize); __ add(R15_esp, R15_esp, size); __ dispatch_next(state, step); @@ -857,7 +865,9 @@ // Our signature handlers copy required arguments to the C stack // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13. __ mr(R3_ARG1, R18_locals); +#if !defined(ABI_ELFv2) __ ld(signature_handler_fd, 0, signature_handler_fd); +#endif __ call_stub(signature_handler_fd); @@ -1019,8 +1029,13 @@ // native result across the call. No oop is present. __ mr(R3_ARG1, R16_thread); +#if defined(ABI_ELFv2) + __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), + relocInfo::none); +#else __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans), relocInfo::none); +#endif __ bind(sync_check_done); @@ -1219,6 +1234,10 @@ __ li(R0, 1); __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); } + + // Argument and return type profiling. + __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4); + // Increment invocation counter and check for overflow. if (inc_counter) { generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); @@ -1538,6 +1557,8 @@ __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); if (ProfileInterpreter) { __ set_method_data_pointer_for_bcp(); + __ ld(R11_scratch1, 0, R1_SP); + __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1); } #if INCLUDE_JVMTI Label L_done; @@ -1549,13 +1570,11 @@ // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. __ ld(R4_ARG2, 0, R18_locals); - __ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), - R4_ARG2, R19_method, R14_bcp); - - __ cmpdi(CCR0, R11_scratch1, 0); + __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false); + __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true); + __ cmpdi(CCR0, R4_ARG2, 0); __ beq(CCR0, L_done); - - __ std(R11_scratch1, wordSize, R15_esp); + __ std(R4_ARG2, wordSize, R15_esp); __ bind(L_done); #endif // INCLUDE_JVMTI __ dispatch_next(vtos); --- ./hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -188,8 +188,12 @@ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); assert(load_bc_into_bc_reg, "we use bc_reg as temp"); __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); - // Big Endian: ((*(cache+indices))>>((1+byte_no)*8))&0xFF + // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: +#if defined(VM_LITTLE_ENDIAN) + __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); +#else __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); +#endif __ cmpwi(CCR0, Rnew_bc, 0); __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); __ beq(CCR0, L_patch_done); @@ -348,7 +352,6 @@ __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); __ bne(CCR0, notInt); - __ isync(); // Order load of constant wrt. tags. __ lwax(R17_tos, Rcpool, Rscratch1); __ push(itos); __ b(exit); @@ -360,7 +363,6 @@ __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); __ asm_assert_eq("unexpected type", 0x8765); #endif - __ isync(); // Order load of constant wrt. tags. __ lfsx(F15_ftos, Rcpool, Rscratch1); __ push(ftos); @@ -419,13 +421,11 @@ // Check out Conversions.java for an example. // Also ConstantPool::header_size() is 20, which makes it very difficult // to double-align double on the constant pool. SG, 11/7/97 - __ isync(); // Order load of constant wrt. tags. __ lfdx(F15_ftos, Rcpool, Rindex); __ push(dtos); __ b(Lexit); __ bind(Llong); - __ isync(); // Order load of constant wrt. tags. __ ldx(R17_tos, Rcpool, Rindex); __ push(ltos); @@ -1838,8 +1838,8 @@ __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); // Load lo & hi. - __ lwz(Rlow_byte, BytesPerInt, Rdef_offset_addr); - __ lwz(Rhigh_byte, BytesPerInt * 2, Rdef_offset_addr); + __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); + __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); // Check for default case (=index outside [low,high]). __ cmpw(CCR0, R17_tos, Rlow_byte); @@ -1853,12 +1853,17 @@ __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); __ sldi(Rindex, Rindex, LogBytesPerInt); __ addi(Rindex, Rindex, 3 * BytesPerInt); +#if defined(VM_LITTLE_ENDIAN) + __ lwbrx(Roffset, Rdef_offset_addr, Rindex); + __ extsw(Roffset, Roffset); +#else __ lwax(Roffset, Rdef_offset_addr, Rindex); +#endif __ b(Ldispatch); __ bind(Ldefault_case); __ profile_switch_default(Rhigh_byte, Rscratch1); - __ lwa(Roffset, 0, Rdef_offset_addr); + __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); __ bind(Ldispatch); @@ -1874,12 +1879,11 @@ // Table switch using linear search through cases. // Bytecode stream format: // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... -// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. +// Note: Everything is big-endian format here. void TemplateTable::fast_linearswitch() { transition(itos, vtos); - Label Lloop_entry, Lsearch_loop, Lfound, Lcontinue_execution, Ldefault_case; - + Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; Register Rcount = R3_ARG1, Rcurrent_pair = R4_ARG2, Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. @@ -1893,47 +1897,40 @@ __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); // Setup loop counter and limit. - __ lwz(Rcount, BytesPerInt, Rdef_offset_addr); // Load count. + __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. - // Set up search loop. + __ mtctr(Rcount); __ cmpwi(CCR0, Rcount, 0); - __ beq(CCR0, Ldefault_case); - - __ mtctr(Rcount); - - // linear table search - __ bind(Lsearch_loop); - - __ lwz(Rvalue, 0, Rcurrent_pair); - __ lwa(Roffset, 1 * BytesPerInt, Rcurrent_pair); - - __ cmpw(CCR0, Rvalue, Rcmp_value); - __ beq(CCR0, Lfound); - - __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); - __ bdnz(Lsearch_loop); - - // default case + __ bne(CCR0, Lloop_entry); + + // Default case __ bind(Ldefault_case); - - __ lwa(Roffset, 0, Rdef_offset_addr); + __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); if (ProfileInterpreter) { __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); - __ b(Lcontinue_execution); } - - // Entry found, skip Roffset bytecodes and continue. - __ bind(Lfound); + __ b(Lcontinue_execution); + + // Next iteration + __ bind(Lsearch_loop); + __ bdz(Ldefault_case); + __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); + __ bind(Lloop_entry); + __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); + __ cmpw(CCR0, Rvalue, Rcmp_value); + __ bne(CCR0, Lsearch_loop); + + // Found, load offset. + __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); + // Calculate case index and profile + __ mfctr(Rcurrent_pair); if (ProfileInterpreter) { - // Calc the num of the pair we hit. Careful, Rcurrent_pair points 2 ints - // beyond the actual current pair due to the auto update load above! - __ sub(Rcurrent_pair, Rcurrent_pair, Rdef_offset_addr); - __ addi(Rcurrent_pair, Rcurrent_pair, - 2 * BytesPerInt); - __ srdi(Rcurrent_pair, Rcurrent_pair, LogBytesPerInt + 1); + __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); - __ bind(Lcontinue_execution); } + + __ bind(Lcontinue_execution); __ add(R14_bcp, Roffset, R14_bcp); __ dispatch_next(vtos); } @@ -1989,7 +1986,7 @@ // initialize i & j __ li(Ri,0); - __ lwz(Rj, -BytesPerInt, Rarray); + __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); // and start. Label entry; @@ -2006,7 +2003,11 @@ // i = h; // } __ sldi(Rscratch, Rh, log_entry_size); +#if defined(VM_LITTLE_ENDIAN) + __ lwbrx(Rscratch, Rscratch, Rarray); +#else __ lwzx(Rscratch, Rscratch, Rarray); +#endif // if (key < current value) // Rh = Rj @@ -2038,20 +2039,20 @@ // Ri = value offset __ sldi(Ri, Ri, log_entry_size); __ add(Ri, Ri, Rarray); - __ lwz(Rscratch, 0, Ri); + __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); Label not_found; // Ri = offset offset __ cmpw(CCR0, Rkey, Rscratch); __ beq(CCR0, not_found); // entry not found -> j = default offset - __ lwz(Rj, -2 * BytesPerInt, Rarray); + __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); __ b(default_case); __ bind(not_found); // entry found -> j = offset __ profile_switch_case(Rh, Rj, Rscratch, Rkey); - __ lwz(Rj, BytesPerInt, Ri); + __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); if (ProfileInterpreter) { __ b(continue_execution); @@ -2146,8 +2147,11 @@ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); // We are resolved if the indices offset contains the current bytecode. - // Big Endian: +#if defined(VM_LITTLE_ENDIAN) + __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); +#else __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); +#endif // Acquire by cmp-br-isync (see below). __ cmpdi(CCR0, Rscratch, (int)bytecode()); __ beq(CCR0, Lresolved); @@ -3230,6 +3234,8 @@ // Load target. __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); __ ldx(Rtarget_method, Rindex, Rrecv_klass); + // Argument and return type profiling. + __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); } @@ -3313,6 +3319,8 @@ __ null_check_throw(Rrecv, -1, Rscratch1); __ profile_final_call(Rrecv, Rscratch1); + // Argument and return type profiling. + __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); // Do the call. __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); @@ -3334,6 +3342,8 @@ __ null_check_throw(Rreceiver, -1, R11_scratch1); __ profile_call(R11_scratch1, R12_scratch2); + // Argument and return type profiling. + __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); } @@ -3348,6 +3358,8 @@ prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); __ profile_call(R11_scratch1, R12_scratch2); + // Argument and return type profiling. + __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); } @@ -3369,6 +3381,8 @@ // Final call case. __ profile_final_call(Rtemp1, Rscratch); + // Argument and return type profiling. + __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true); // Do the final call - the index (f2) contains the method. __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); @@ -3420,6 +3434,8 @@ __ cmpdi(CCR0, Rindex, 0); __ beq(CCR0, Lthrow_ame); // Found entry. Jump off! + // Argument and return type profiling. + __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true); __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); // Vtable entry was NULL => Throw abstract method error. @@ -3473,6 +3489,8 @@ // to be the callsite object the bootstrap method returned. This is passed to a // "link" method which does the dispatch (Most likely just grabs the MH stored // inside the callsite and does an invokehandle). + // Argument and return type profiling. + __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); } @@ -3499,6 +3517,8 @@ __ profile_final_call(Rrecv, Rscratch1); // Still no call from handle => We call the method handle interpreter here. + // Argument and return type profiling. + __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); } --- ./hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -139,13 +139,44 @@ } assert(AllocatePrefetchLines > 0, "invalid value"); - if (AllocatePrefetchLines < 1) // Set valid value in product VM. + if (AllocatePrefetchLines < 1) { // Set valid value in product VM. AllocatePrefetchLines = 1; // Conservative value. + } - if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) + if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) { AllocatePrefetchStyle = 1; // Fall back if inappropriate. + } assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive"); + + if (UseCRC32Intrinsics) { + if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) + warning("CRC32 intrinsics are not available on this CPU"); + FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); + } + + // The AES intrinsic stubs require AES instruction support. + if (UseAES) { + warning("AES instructions are not available on this CPU"); + FLAG_SET_DEFAULT(UseAES, false); + } + if (UseAESIntrinsics) { + if (!FLAG_IS_DEFAULT(UseAESIntrinsics)) + warning("AES intrinsics are not available on this CPU"); + FLAG_SET_DEFAULT(UseAESIntrinsics, false); + } + + if (UseSHA) { + warning("SHA instructions are not available on this CPU"); + FLAG_SET_DEFAULT(UseSHA, false); + } + if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) { + warning("SHA intrinsics are not available on this CPU"); + FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); + FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); + FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); + } + } void VM_Version::print_features() { @@ -352,7 +383,7 @@ if (PrintAssembly) { ttyLocker ttyl; - tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", code); + tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", p2i(code)); Disassembler::decode((u_char*)code, (u_char*)code_end, tty); tty->print_cr("Time loop1 :%f", loop1_seconds); tty->print_cr("Time loop2 :%f", loop2_seconds); @@ -435,7 +466,7 @@ // Print the detection code. if (PrintAssembly) { ttyLocker ttyl; - tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", code); + tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", p2i(code)); Disassembler::decode((u_char*)code, (u_char*)code_end, tty); } @@ -468,7 +499,7 @@ // Print the detection code. if (PrintAssembly) { ttyLocker ttyl; - tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", code); + tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", p2i(code)); Disassembler::decode((u_char*)code, (u_char*)code_end, tty); } --- ./hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -123,6 +123,7 @@ fpop2_op3 = 0x35, impdep1_op3 = 0x36, aes3_op3 = 0x36, + sha_op3 = 0x36, alignaddr_op3 = 0x36, faligndata_op3 = 0x36, flog3_op3 = 0x36, @@ -223,7 +224,11 @@ mwtos_opf = 0x119, aes_kexpand0_opf = 0x130, - aes_kexpand2_opf = 0x131 + aes_kexpand2_opf = 0x131, + + sha1_opf = 0x141, + sha256_opf = 0x142, + sha512_opf = 0x143 }; enum op5s { @@ -595,6 +600,11 @@ // AES crypto instructions supported only on certain processors static void aes_only() { assert( VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); } + // SHA crypto instructions supported only on certain processors + static void sha1_only() { assert( VM_Version::has_sha1(), "This instruction only works on SPARC with SHA1"); } + static void sha256_only() { assert( VM_Version::has_sha256(), "This instruction only works on SPARC with SHA256"); } + static void sha512_only() { assert( VM_Version::has_sha512(), "This instruction only works on SPARC with SHA512"); } + // instruction only in VIS1 static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); } @@ -1179,7 +1189,6 @@ u_field(3, 29, 25) | immed(true) | simm(simm13a, 13)); } inline void wrfprs( Register d) { v9_only(); emit_int32( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); } - // VIS1 instructions void alignaddr( Register s1, Register s2, Register d ) { vis1_only(); emit_int32( op(arith_op) | rd(d) | op3(alignaddr_op3) | rs1(s1) | opf(alignaddr_opf) | rs2(s2)); } @@ -1203,6 +1212,12 @@ void movwtos( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); } void movxtod( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); } + // Crypto SHA instructions + + void sha1() { sha1_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha1_opf)); } + void sha256() { sha256_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha256_opf)); } + void sha512() { sha512_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha512_opf)); } + // Creation Assembler(CodeBuffer* code) : AbstractAssembler(code) { #ifdef CHECK_DELAY --- ./hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -872,21 +872,19 @@ void LIRGenerator::do_NewInstance(NewInstance* x) { + print_if_not_loaded(x); + // This instruction can be deoptimized in the slow path : use // O0 as result register. const LIR_Opr reg = result_register_for(x->type()); -#ifndef PRODUCT - if (PrintNotLoaded && !x->klass()->is_loaded()) { - tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); - } -#endif + CodeEmitInfo* info = state_for(x, x->state()); LIR_Opr tmp1 = FrameMap::G1_oop_opr; LIR_Opr tmp2 = FrameMap::G3_oop_opr; LIR_Opr tmp3 = FrameMap::G4_oop_opr; LIR_Opr tmp4 = FrameMap::O1_oop_opr; LIR_Opr klass_reg = FrameMap::G5_metadata_opr; - new_instance(reg, x->klass(), tmp1, tmp2, tmp3, tmp4, klass_reg, info); + new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info); LIR_Opr result = rlock_result(x); __ move(reg, result); } --- ./hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -50,34 +50,6 @@ return is_icholder_entry(call->destination()); } -//----------------------------------------------------------------------------- -// High-level access to an inline cache. Guaranteed to be MT-safe. - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // Search for the ic_call at the given address. - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; - } -} - // ---------------------------------------------------------------------------- #define __ _masm. --- ./hotspot/src/cpu/sparc/vm/sparc.ad Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/sparc/vm/sparc.ad Wed Feb 04 12:14:39 2015 -0800 @@ -6184,7 +6184,11 @@ ins_cost(DEFAULT_COST * 3/2); format %{ "SET $con,$dst\t! non-oop ptr" %} ins_encode %{ - __ set($con$$constant, $dst$$Register); + if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) { + __ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register); + } else { + __ set($con$$constant, $dst$$Register); + } %} ins_pipe(loadConP); %} --- ./hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -4575,6 +4575,219 @@ return start; } + address generate_sha1_implCompress(bool multi_block, const char *name) { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + Label L_sha1_loop, L_sha1_unaligned_input, L_sha1_unaligned_input_loop; + int i; + + Register buf = O0; // byte[] source+offset + Register state = O1; // int[] SHA.state + Register ofs = O2; // int offset + Register limit = O3; // int limit + + // load state into F0-F4 + for (i = 0; i < 5; i++) { + __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i)); + } + + __ andcc(buf, 7, G0); + __ br(Assembler::notZero, false, Assembler::pn, L_sha1_unaligned_input); + __ delayed()->nop(); + + __ BIND(L_sha1_loop); + // load buf into F8-F22 + for (i = 0; i < 8; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); + } + __ sha1(); + if (multi_block) { + __ add(ofs, 64, ofs); + __ add(buf, 64, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F4 into state and return + for (i = 0; i < 4; i++) { + __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10); + + __ BIND(L_sha1_unaligned_input); + __ alignaddr(buf, G0, buf); + + __ BIND(L_sha1_unaligned_input_loop); + // load buf into F8-F22 + for (i = 0; i < 9; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); + } + for (i = 0; i < 8; i++) { + __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8)); + } + __ sha1(); + if (multi_block) { + __ add(ofs, 64, ofs); + __ add(buf, 64, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_unaligned_input_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F4 into state and return + for (i = 0; i < 4; i++) { + __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10); + + return start; + } + + address generate_sha256_implCompress(bool multi_block, const char *name) { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + Label L_sha256_loop, L_sha256_unaligned_input, L_sha256_unaligned_input_loop; + int i; + + Register buf = O0; // byte[] source+offset + Register state = O1; // int[] SHA2.state + Register ofs = O2; // int offset + Register limit = O3; // int limit + + // load state into F0-F7 + for (i = 0; i < 8; i++) { + __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i)); + } + + __ andcc(buf, 7, G0); + __ br(Assembler::notZero, false, Assembler::pn, L_sha256_unaligned_input); + __ delayed()->nop(); + + __ BIND(L_sha256_loop); + // load buf into F8-F22 + for (i = 0; i < 8; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); + } + __ sha256(); + if (multi_block) { + __ add(ofs, 64, ofs); + __ add(buf, 64, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F7 into state and return + for (i = 0; i < 7; i++) { + __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c); + + __ BIND(L_sha256_unaligned_input); + __ alignaddr(buf, G0, buf); + + __ BIND(L_sha256_unaligned_input_loop); + // load buf into F8-F22 + for (i = 0; i < 9; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); + } + for (i = 0; i < 8; i++) { + __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8)); + } + __ sha256(); + if (multi_block) { + __ add(ofs, 64, ofs); + __ add(buf, 64, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_unaligned_input_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F7 into state and return + for (i = 0; i < 7; i++) { + __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c); + + return start; + } + + address generate_sha512_implCompress(bool multi_block, const char *name) { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + Label L_sha512_loop, L_sha512_unaligned_input, L_sha512_unaligned_input_loop; + int i; + + Register buf = O0; // byte[] source+offset + Register state = O1; // long[] SHA5.state + Register ofs = O2; // int offset + Register limit = O3; // int limit + + // load state into F0-F14 + for (i = 0; i < 8; i++) { + __ ldf(FloatRegisterImpl::D, state, i*8, as_FloatRegister(i*2)); + } + + __ andcc(buf, 7, G0); + __ br(Assembler::notZero, false, Assembler::pn, L_sha512_unaligned_input); + __ delayed()->nop(); + + __ BIND(L_sha512_loop); + // load buf into F16-F46 + for (i = 0; i < 16; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16)); + } + __ sha512(); + if (multi_block) { + __ add(ofs, 128, ofs); + __ add(buf, 128, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F14 into state and return + for (i = 0; i < 7; i++) { + __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38); + + __ BIND(L_sha512_unaligned_input); + __ alignaddr(buf, G0, buf); + + __ BIND(L_sha512_unaligned_input_loop); + // load buf into F16-F46 + for (i = 0; i < 17; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16)); + } + for (i = 0; i < 16; i++) { + __ faligndata(as_FloatRegister(i*2 + 16), as_FloatRegister(i*2 + 18), as_FloatRegister(i*2 + 16)); + } + __ sha512(); + if (multi_block) { + __ add(ofs, 128, ofs); + __ add(buf, 128, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_unaligned_input_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F14 into state and return + for (i = 0; i < 7; i++) { + __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38); + + return start; + } + void generate_initial() { // Generates all stubs and initializes the entry points @@ -4647,6 +4860,20 @@ StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); } + + // generate SHA1/SHA256/SHA512 intrinsics code + if (UseSHA1Intrinsics) { + StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); + StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); + } + if (UseSHA256Intrinsics) { + StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); + StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); + } + if (UseSHA512Intrinsics) { + StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); + StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); + } } --- ./hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -41,7 +41,7 @@ enum /* platform_dependent_constants */ { // %%%%%%%% May be able to shrink this a lot code_size1 = 20000, // simply increase if too small (assembler will crash if too small) - code_size2 = 22000 // simply increase if too small (assembler will crash if too small) + code_size2 = 23000 // simply increase if too small (assembler will crash if too small) }; class Sparc { --- ./hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -37,6 +37,7 @@ int VM_Version::_features = VM_Version::unknown_m; const char* VM_Version::_features_str = ""; +unsigned int VM_Version::_L2_cache_line_size = 0; void VM_Version::initialize() { _features = determine_features(); @@ -197,7 +198,7 @@ } assert(BlockZeroingLowLimit > 0, "invalid value"); - if (has_block_zeroing()) { + if (has_block_zeroing() && cache_line_size > 0) { if (FLAG_IS_DEFAULT(UseBlockZeroing)) { FLAG_SET_DEFAULT(UseBlockZeroing, true); } @@ -207,7 +208,7 @@ } assert(BlockCopyLowLimit > 0, "invalid value"); - if (has_block_zeroing()) { // has_blk_init() && is_T4(): core's local L2 cache + if (has_block_zeroing() && cache_line_size > 0) { // has_blk_init() && is_T4(): core's local L2 cache if (FLAG_IS_DEFAULT(UseBlockCopy)) { FLAG_SET_DEFAULT(UseBlockCopy, true); } @@ -234,7 +235,7 @@ assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size"); char buf[512]; - jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")), (has_hardware_popc() ? ", popc" : ""), (has_vis1() ? ", vis1" : ""), @@ -243,6 +244,9 @@ (has_blk_init() ? ", blk_init" : ""), (has_cbcond() ? ", cbcond" : ""), (has_aes() ? ", aes" : ""), + (has_sha1() ? ", sha1" : ""), + (has_sha256() ? ", sha256" : ""), + (has_sha512() ? ", sha512" : ""), (is_ultra3() ? ", ultra3" : ""), (is_sun4v() ? ", sun4v" : ""), (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")), @@ -301,12 +305,65 @@ } } + // SHA1, SHA256, and SHA512 instructions were added to SPARC T-series at different times + if (has_sha1() || has_sha256() || has_sha512()) { + if (UseVIS > 0) { // SHA intrinsics use VIS1 instructions + if (FLAG_IS_DEFAULT(UseSHA)) { + FLAG_SET_DEFAULT(UseSHA, true); + } + } else { + if (UseSHA) { + warning("SPARC SHA intrinsics require VIS1 instruction support. Intrinsics will be disabled."); + FLAG_SET_DEFAULT(UseSHA, false); + } + } + } else if (UseSHA) { + warning("SHA instructions are not available on this CPU"); + FLAG_SET_DEFAULT(UseSHA, false); + } + + if (!UseSHA) { + FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); + FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); + FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); + } else { + if (has_sha1()) { + if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { + FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); + } + } else if (UseSHA1Intrinsics) { + warning("SHA1 instruction is not available on this CPU."); + FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); + } + if (has_sha256()) { + if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { + FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); + } + } else if (UseSHA256Intrinsics) { + warning("SHA256 instruction (for SHA-224 and SHA-256) is not available on this CPU."); + FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); + } + + if (has_sha512()) { + if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) { + FLAG_SET_DEFAULT(UseSHA512Intrinsics, true); + } + } else if (UseSHA512Intrinsics) { + warning("SHA512 instruction (for SHA-384 and SHA-512) is not available on this CPU."); + FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); + } + if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { + FLAG_SET_DEFAULT(UseSHA, false); + } + } + if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && (cache_line_size > ContendedPaddingWidth)) ContendedPaddingWidth = cache_line_size; #ifndef PRODUCT if (PrintMiscellaneous && Verbose) { + tty->print_cr("L2 cache line size: %u", L2_cache_line_size()); tty->print("Allocation"); if (AllocatePrefetchStyle <= 0) { tty->print_cr(": no prefetching"); --- ./hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,10 @@ T_family = 16, T1_model = 17, sparc5_instructions = 18, - aes_instructions = 19 + aes_instructions = 19, + sha1_instruction = 20, + sha256_instruction = 21, + sha512_instruction = 22 }; enum Feature_Flag_Set { @@ -77,6 +80,9 @@ T1_model_m = 1 << T1_model, sparc5_instructions_m = 1 << sparc5_instructions, aes_instructions_m = 1 << aes_instructions, + sha1_instruction_m = 1 << sha1_instruction, + sha256_instruction_m = 1 << sha256_instruction, + sha512_instruction_m = 1 << sha512_instruction, generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m, generic_v9_m = generic_v8_m | v9_instructions_m, @@ -90,6 +96,9 @@ static int _features; static const char* _features_str; + static unsigned int _L2_cache_line_size; + static unsigned int L2_cache_line_size() { return _L2_cache_line_size; } + static void print_features(); static int determine_features(); static int platform_features(int features); @@ -129,6 +138,9 @@ static bool has_cbcond() { return (_features & cbcond_instructions_m) != 0; } static bool has_sparc5_instr() { return (_features & sparc5_instructions_m) != 0; } static bool has_aes() { return (_features & aes_instructions_m) != 0; } + static bool has_sha1() { return (_features & sha1_instruction_m) != 0; } + static bool has_sha256() { return (_features & sha256_instruction_m) != 0; } + static bool has_sha512() { return (_features & sha512_instruction_m) != 0; } static bool supports_compare_and_exchange() { return has_v9(); } @@ -158,9 +170,8 @@ static const char* cpu_features() { return _features_str; } - static intx prefetch_data_size() { - return is_T4() && !is_T7() ? 32 : 64; // default prefetch block size on sparc - } + // default prefetch block size on sparc + static intx prefetch_data_size() { return L2_cache_line_size(); } // Prefetch static intx prefetch_copy_interval_in_bytes() { --- ./hotspot/src/cpu/x86/vm/assembler_x86.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/assembler_x86.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -3854,6 +3854,15 @@ } // Carry-Less Multiplication Quadword +void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { + assert(VM_Version::supports_clmul(), ""); + int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A); + emit_int8(0x44); + emit_int8((unsigned char)(0xC0 | encode)); + emit_int8((unsigned char)mask); +} + +// Carry-Less Multiplication Quadword void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); bool vector256 = false; @@ -4928,6 +4937,26 @@ emit_arith(0x03, 0xC0, dst, src); } +void Assembler::adcxq(Register dst, Register src) { + //assert(VM_Version::supports_adx(), "adx instructions not supported"); + emit_int8((unsigned char)0x66); + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_int8(0x0F); + emit_int8(0x38); + emit_int8((unsigned char)0xF6); + emit_int8((unsigned char)(0xC0 | encode)); +} + +void Assembler::adoxq(Register dst, Register src) { + //assert(VM_Version::supports_adx(), "adx instructions not supported"); + emit_int8((unsigned char)0xF3); + int encode = prefixq_and_encode(dst->encoding(), src->encoding()); + emit_int8(0x0F); + emit_int8(0x38); + emit_int8((unsigned char)0xF6); + emit_int8((unsigned char)(0xC0 | encode)); +} + void Assembler::andq(Address dst, int32_t imm32) { InstructionMark im(this); prefixq(dst); @@ -5435,6 +5464,26 @@ emit_int8((unsigned char)(0xC0 | encode)); } +void Assembler::mulq(Address src) { + InstructionMark im(this); + prefixq(src); + emit_int8((unsigned char)0xF7); + emit_operand(rsp, src); +} + +void Assembler::mulq(Register src) { + int encode = prefixq_and_encode(src->encoding()); + emit_int8((unsigned char)0xF7); + emit_int8((unsigned char)(0xE0 | encode)); +} + +void Assembler::mulxq(Register dst1, Register dst2, Register src) { + assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); + int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, true, false); + emit_int8((unsigned char)0xF6); + emit_int8((unsigned char)(0xC0 | encode)); +} + void Assembler::negq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); emit_int8((unsigned char)0xF7); @@ -5563,6 +5612,28 @@ emit_int8(imm8); } } + +void Assembler::rorq(Register dst, int imm8) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + int encode = prefixq_and_encode(dst->encoding()); + if (imm8 == 1) { + emit_int8((unsigned char)0xD1); + emit_int8((unsigned char)(0xC8 | encode)); + } else { + emit_int8((unsigned char)0xC1); + emit_int8((unsigned char)(0xc8 | encode)); + emit_int8(imm8); + } +} + +void Assembler::rorxq(Register dst, Register src, int imm8) { + assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); + int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, true, false); + emit_int8((unsigned char)0xF0); + emit_int8((unsigned char)(0xC0 | encode)); + emit_int8(imm8); +} + void Assembler::sarq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); --- ./hotspot/src/cpu/x86/vm/assembler_x86.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/assembler_x86.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -888,6 +888,14 @@ void addq(Register dst, Address src); void addq(Register dst, Register src); +#ifdef _LP64 + //Add Unsigned Integers with Carry Flag + void adcxq(Register dst, Register src); + + //Add Unsigned Integers with Overflow Flag + void adoxq(Register dst, Register src); +#endif + void addr_nop_4(); void addr_nop_5(); void addr_nop_7(); @@ -1204,19 +1212,20 @@ void idivl(Register src); void divl(Register src); // Unsigned division +#ifdef _LP64 void idivq(Register src); +#endif void imull(Register dst, Register src); void imull(Register dst, Register src, int value); void imull(Register dst, Address src); +#ifdef _LP64 void imulq(Register dst, Register src); void imulq(Register dst, Register src, int value); -#ifdef _LP64 void imulq(Register dst, Address src); #endif - // jcc is the generic conditional branch generator to run- // time routines, jcc is used for branches to labels. jcc // takes a branch opcode (cc) and a label (L) and generates @@ -1408,9 +1417,16 @@ void movzwq(Register dst, Register src); #endif + // Unsigned multiply with RAX destination register void mull(Address src); void mull(Register src); +#ifdef _LP64 + void mulq(Address src); + void mulq(Register src); + void mulxq(Register dst1, Register dst2, Register src); +#endif + // Multiply Scalar Double-Precision Floating-Point Values void mulsd(XMMRegister dst, Address src); void mulsd(XMMRegister dst, XMMRegister src); @@ -1541,6 +1557,11 @@ void ret(int imm16); +#ifdef _LP64 + void rorq(Register dst, int imm8); + void rorxq(Register dst, Register src, int imm8); +#endif + void sahf(); void sarl(Register dst, int imm8); @@ -1837,6 +1858,7 @@ void vpbroadcastd(XMMRegister dst, XMMRegister src); // Carry-Less Multiplication Quadword + void pclmulqdq(XMMRegister dst, XMMRegister src, int mask); void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask); // AVX instruction which is used to clear upper 128 bits of YMM registers and --- ./hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1085,14 +1085,11 @@ void LIRGenerator::do_NewInstance(NewInstance* x) { -#ifndef PRODUCT - if (PrintNotLoaded && !x->klass()->is_loaded()) { - tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); - } -#endif + print_if_not_loaded(x); + CodeEmitInfo* info = state_for(x, x->state()); LIR_Opr reg = result_register_for(x->type()); - new_instance(reg, x->klass(), + new_instance(reg, x->klass(), x->is_unresolved(), FrameMap::rcx_oop_opr, FrameMap::rdi_oop_opr, FrameMap::rsi_oop_opr, --- ./hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -675,7 +675,7 @@ case handle_exception_nofpu_id: case handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id); + oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); break; case handle_exception_from_callee_id: { // At this point all registers except exception oop (RAX) and @@ -748,7 +748,7 @@ case handle_exception_nofpu_id: case handle_exception_id: // Restore the registers that were saved at the beginning. - restore_live_registers(sasm, id == handle_exception_nofpu_id); + restore_live_registers(sasm, id != handle_exception_nofpu_id); break; case handle_exception_from_callee_id: // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP --- ./hotspot/src/cpu/x86/vm/compiledIC_x86.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/compiledIC_x86.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -47,34 +47,6 @@ return is_icholder_entry(call->destination()); } -//----------------------------------------------------------------------------- -// High-level access to an inline cache. Guaranteed to be MT-safe. - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // Search for the ic_call at the given address. - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; - } -} - // ---------------------------------------------------------------------------- #define __ _masm. --- ./hotspot/src/cpu/x86/vm/globals_x86.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/globals_x86.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -130,16 +130,16 @@ "Use fast-string operation for zeroing: rep stosb") \ \ /* Use Restricted Transactional Memory for lock eliding */ \ - experimental(bool, UseRTMLocking, false, \ + product(bool, UseRTMLocking, false, \ "Enable RTM lock eliding for inflated locks in compiled code") \ \ experimental(bool, UseRTMForStackLocks, false, \ "Enable RTM lock eliding for stack locks in compiled code") \ \ - experimental(bool, UseRTMDeopt, false, \ + product(bool, UseRTMDeopt, false, \ "Perform deopt and recompilation based on RTM abort ratio") \ \ - experimental(uintx, RTMRetryCount, 5, \ + product(uintx, RTMRetryCount, 5, \ "Number of RTM retries on lock abort or busy") \ \ experimental(intx, RTMSpinLoopCount, 100, \ @@ -176,6 +176,8 @@ "Use count trailing zeros instruction") \ \ product(bool, UseBMI1Instructions, false, \ - "Use BMI instructions") - + "Use BMI1 instructions") \ + \ + product(bool, UseBMI2Instructions, false, \ + "Use BMI2 instructions") #endif // CPU_X86_VM_GLOBALS_X86_HPP --- ./hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1769,7 +1769,7 @@ // at [FETCH], below, will never observe a biased encoding (*101b). // If this invariant is not held we risk exclusion (safety) failure. if (UseBiasedLocking && !UseOptoBiasInlining) { - biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters); + biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters); } #if INCLUDE_RTM_OPT @@ -7293,6 +7293,467 @@ bind(L_done); } +#ifdef _LP64 +/** + * Helper for multiply_to_len(). + */ +void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { + addq(dest_lo, src1); + adcq(dest_hi, 0); + addq(dest_lo, src2); + adcq(dest_hi, 0); +} + +/** + * Multiply 64 bit by 64 bit first loop. + */ +void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, + Register y, Register y_idx, Register z, + Register carry, Register product, + Register idx, Register kdx) { + // + // jlong carry, x[], y[], z[]; + // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { + // huge_128 product = y[idx] * x[xstart] + carry; + // z[kdx] = (jlong)product; + // carry = (jlong)(product >>> 64); + // } + // z[xstart] = carry; + // + + Label L_first_loop, L_first_loop_exit; + Label L_one_x, L_one_y, L_multiply; + + decrementl(xstart); + jcc(Assembler::negative, L_one_x); + + movq(x_xstart, Address(x, xstart, Address::times_4, 0)); + rorq(x_xstart, 32); // convert big-endian to little-endian + + bind(L_first_loop); + decrementl(idx); + jcc(Assembler::negative, L_first_loop_exit); + decrementl(idx); + jcc(Assembler::negative, L_one_y); + movq(y_idx, Address(y, idx, Address::times_4, 0)); + rorq(y_idx, 32); // convert big-endian to little-endian + bind(L_multiply); + movq(product, x_xstart); + mulq(y_idx); // product(rax) * y_idx -> rdx:rax + addq(product, carry); + adcq(rdx, 0); + subl(kdx, 2); + movl(Address(z, kdx, Address::times_4, 4), product); + shrq(product, 32); + movl(Address(z, kdx, Address::times_4, 0), product); + movq(carry, rdx); + jmp(L_first_loop); + + bind(L_one_y); + movl(y_idx, Address(y, 0)); + jmp(L_multiply); + + bind(L_one_x); + movl(x_xstart, Address(x, 0)); + jmp(L_first_loop); + + bind(L_first_loop_exit); +} + +/** + * Multiply 64 bit by 64 bit and add 128 bit. + */ +void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, + Register yz_idx, Register idx, + Register carry, Register product, int offset) { + // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; + // z[kdx] = (jlong)product; + + movq(yz_idx, Address(y, idx, Address::times_4, offset)); + rorq(yz_idx, 32); // convert big-endian to little-endian + movq(product, x_xstart); + mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) + movq(yz_idx, Address(z, idx, Address::times_4, offset)); + rorq(yz_idx, 32); // convert big-endian to little-endian + + add2_with_carry(rdx, product, carry, yz_idx); + + movl(Address(z, idx, Address::times_4, offset+4), product); + shrq(product, 32); + movl(Address(z, idx, Address::times_4, offset), product); + +} + +/** + * Multiply 128 bit by 128 bit. Unrolled inner loop. + */ +void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, + Register yz_idx, Register idx, Register jdx, + Register carry, Register product, + Register carry2) { + // jlong carry, x[], y[], z[]; + // int kdx = ystart+1; + // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop + // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; + // z[kdx+idx+1] = (jlong)product; + // jlong carry2 = (jlong)(product >>> 64); + // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; + // z[kdx+idx] = (jlong)product; + // carry = (jlong)(product >>> 64); + // } + // idx += 2; + // if (idx > 0) { + // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; + // z[kdx+idx] = (jlong)product; + // carry = (jlong)(product >>> 64); + // } + // + + Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; + + movl(jdx, idx); + andl(jdx, 0xFFFFFFFC); + shrl(jdx, 2); + + bind(L_third_loop); + subl(jdx, 1); + jcc(Assembler::negative, L_third_loop_exit); + subl(idx, 4); + + multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); + movq(carry2, rdx); + + multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); + movq(carry, rdx); + jmp(L_third_loop); + + bind (L_third_loop_exit); + + andl (idx, 0x3); + jcc(Assembler::zero, L_post_third_loop_done); + + Label L_check_1; + subl(idx, 2); + jcc(Assembler::negative, L_check_1); + + multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); + movq(carry, rdx); + + bind (L_check_1); + addl (idx, 0x2); + andl (idx, 0x1); + subl(idx, 1); + jcc(Assembler::negative, L_post_third_loop_done); + + movl(yz_idx, Address(y, idx, Address::times_4, 0)); + movq(product, x_xstart); + mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) + movl(yz_idx, Address(z, idx, Address::times_4, 0)); + + add2_with_carry(rdx, product, yz_idx, carry); + + movl(Address(z, idx, Address::times_4, 0), product); + shrq(product, 32); + + shlq(rdx, 32); + orq(product, rdx); + movq(carry, product); + + bind(L_post_third_loop_done); +} + +/** + * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. + * + */ +void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, + Register carry, Register carry2, + Register idx, Register jdx, + Register yz_idx1, Register yz_idx2, + Register tmp, Register tmp3, Register tmp4) { + assert(UseBMI2Instructions, "should be used only when BMI2 is available"); + + // jlong carry, x[], y[], z[]; + // int kdx = ystart+1; + // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop + // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; + // jlong carry2 = (jlong)(tmp3 >>> 64); + // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; + // carry = (jlong)(tmp4 >>> 64); + // z[kdx+idx+1] = (jlong)tmp3; + // z[kdx+idx] = (jlong)tmp4; + // } + // idx += 2; + // if (idx > 0) { + // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; + // z[kdx+idx] = (jlong)yz_idx1; + // carry = (jlong)(yz_idx1 >>> 64); + // } + // + + Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; + + movl(jdx, idx); + andl(jdx, 0xFFFFFFFC); + shrl(jdx, 2); + + bind(L_third_loop); + subl(jdx, 1); + jcc(Assembler::negative, L_third_loop_exit); + subl(idx, 4); + + movq(yz_idx1, Address(y, idx, Address::times_4, 8)); + rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian + movq(yz_idx2, Address(y, idx, Address::times_4, 0)); + rorxq(yz_idx2, yz_idx2, 32); + + mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 + mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp + + movq(yz_idx1, Address(z, idx, Address::times_4, 8)); + rorxq(yz_idx1, yz_idx1, 32); + movq(yz_idx2, Address(z, idx, Address::times_4, 0)); + rorxq(yz_idx2, yz_idx2, 32); + + if (VM_Version::supports_adx()) { + adcxq(tmp3, carry); + adoxq(tmp3, yz_idx1); + + adcxq(tmp4, tmp); + adoxq(tmp4, yz_idx2); + + movl(carry, 0); // does not affect flags + adcxq(carry2, carry); + adoxq(carry2, carry); + } else { + add2_with_carry(tmp4, tmp3, carry, yz_idx1); + add2_with_carry(carry2, tmp4, tmp, yz_idx2); + } + movq(carry, carry2); + + movl(Address(z, idx, Address::times_4, 12), tmp3); + shrq(tmp3, 32); + movl(Address(z, idx, Address::times_4, 8), tmp3); + + movl(Address(z, idx, Address::times_4, 4), tmp4); + shrq(tmp4, 32); + movl(Address(z, idx, Address::times_4, 0), tmp4); + + jmp(L_third_loop); + + bind (L_third_loop_exit); + + andl (idx, 0x3); + jcc(Assembler::zero, L_post_third_loop_done); + + Label L_check_1; + subl(idx, 2); + jcc(Assembler::negative, L_check_1); + + movq(yz_idx1, Address(y, idx, Address::times_4, 0)); + rorxq(yz_idx1, yz_idx1, 32); + mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 + movq(yz_idx2, Address(z, idx, Address::times_4, 0)); + rorxq(yz_idx2, yz_idx2, 32); + + add2_with_carry(tmp4, tmp3, carry, yz_idx2); + + movl(Address(z, idx, Address::times_4, 4), tmp3); + shrq(tmp3, 32); + movl(Address(z, idx, Address::times_4, 0), tmp3); + movq(carry, tmp4); + + bind (L_check_1); + addl (idx, 0x2); + andl (idx, 0x1); + subl(idx, 1); + jcc(Assembler::negative, L_post_third_loop_done); + movl(tmp4, Address(y, idx, Address::times_4, 0)); + mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 + movl(tmp4, Address(z, idx, Address::times_4, 0)); + + add2_with_carry(carry2, tmp3, tmp4, carry); + + movl(Address(z, idx, Address::times_4, 0), tmp3); + shrq(tmp3, 32); + + shlq(carry2, 32); + orq(tmp3, carry2); + movq(carry, tmp3); + + bind(L_post_third_loop_done); +} + +/** + * Code for BigInteger::multiplyToLen() instrinsic. + * + * rdi: x + * rax: xlen + * rsi: y + * rcx: ylen + * r8: z + * r11: zlen + * r12: tmp1 + * r13: tmp2 + * r14: tmp3 + * r15: tmp4 + * rbx: tmp5 + * + */ +void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, + Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { + ShortBranchVerifier sbv(this); + assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); + + push(tmp1); + push(tmp2); + push(tmp3); + push(tmp4); + push(tmp5); + + push(xlen); + push(zlen); + + const Register idx = tmp1; + const Register kdx = tmp2; + const Register xstart = tmp3; + + const Register y_idx = tmp4; + const Register carry = tmp5; + const Register product = xlen; + const Register x_xstart = zlen; // reuse register + + // First Loop. + // + // final static long LONG_MASK = 0xffffffffL; + // int xstart = xlen - 1; + // int ystart = ylen - 1; + // long carry = 0; + // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { + // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; + // z[kdx] = (int)product; + // carry = product >>> 32; + // } + // z[xstart] = (int)carry; + // + + movl(idx, ylen); // idx = ylen; + movl(kdx, zlen); // kdx = xlen+ylen; + xorq(carry, carry); // carry = 0; + + Label L_done; + + movl(xstart, xlen); + decrementl(xstart); + jcc(Assembler::negative, L_done); + + multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); + + Label L_second_loop; + testl(kdx, kdx); + jcc(Assembler::zero, L_second_loop); + + Label L_carry; + subl(kdx, 1); + jcc(Assembler::zero, L_carry); + + movl(Address(z, kdx, Address::times_4, 0), carry); + shrq(carry, 32); + subl(kdx, 1); + + bind(L_carry); + movl(Address(z, kdx, Address::times_4, 0), carry); + + // Second and third (nested) loops. + // + // for (int i = xstart-1; i >= 0; i--) { // Second loop + // carry = 0; + // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop + // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + + // (z[k] & LONG_MASK) + carry; + // z[k] = (int)product; + // carry = product >>> 32; + // } + // z[i] = (int)carry; + // } + // + // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx + + const Register jdx = tmp1; + + bind(L_second_loop); + xorl(carry, carry); // carry = 0; + movl(jdx, ylen); // j = ystart+1 + + subl(xstart, 1); // i = xstart-1; + jcc(Assembler::negative, L_done); + + push (z); + + Label L_last_x; + lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j + subl(xstart, 1); // i = xstart-1; + jcc(Assembler::negative, L_last_x); + + if (UseBMI2Instructions) { + movq(rdx, Address(x, xstart, Address::times_4, 0)); + rorxq(rdx, rdx, 32); // convert big-endian to little-endian + } else { + movq(x_xstart, Address(x, xstart, Address::times_4, 0)); + rorq(x_xstart, 32); // convert big-endian to little-endian + } + + Label L_third_loop_prologue; + bind(L_third_loop_prologue); + + push (x); + push (xstart); + push (ylen); + + + if (UseBMI2Instructions) { + multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); + } else { // !UseBMI2Instructions + multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); + } + + pop(ylen); + pop(xlen); + pop(x); + pop(z); + + movl(tmp3, xlen); + addl(tmp3, 1); + movl(Address(z, tmp3, Address::times_4, 0), carry); + subl(tmp3, 1); + jccb(Assembler::negative, L_done); + + shrq(carry, 32); + movl(Address(z, tmp3, Address::times_4, 0), carry); + jmp(L_second_loop); + + // Next infrequent code is moved outside loops. + bind(L_last_x); + if (UseBMI2Instructions) { + movl(rdx, Address(x, 0)); + } else { + movl(x_xstart, Address(x, 0)); + } + jmp(L_third_loop_prologue); + + bind(L_done); + + pop(zlen); + pop(xlen); + + pop(tmp5); + pop(tmp4); + pop(tmp3); + pop(tmp2); + pop(tmp1); +} +#endif + /** * Emits code to update CRC-32 with a byte value according to constants in table * @@ -7316,17 +7777,34 @@ * Fold 128-bit data chunk */ void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { - vpclmulhdq(xtmp, xK, xcrc); // [123:64] - vpclmulldq(xcrc, xK, xcrc); // [63:0] - vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */); - pxor(xcrc, xtmp); + if (UseAVX > 0) { + vpclmulhdq(xtmp, xK, xcrc); // [123:64] + vpclmulldq(xcrc, xK, xcrc); // [63:0] + vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */); + pxor(xcrc, xtmp); + } else { + movdqa(xtmp, xcrc); + pclmulhdq(xtmp, xK); // [123:64] + pclmulldq(xcrc, xK); // [63:0] + pxor(xcrc, xtmp); + movdqu(xtmp, Address(buf, offset)); + pxor(xcrc, xtmp); + } } void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { - vpclmulhdq(xtmp, xK, xcrc); - vpclmulldq(xcrc, xK, xcrc); - pxor(xcrc, xbuf); - pxor(xcrc, xtmp); + if (UseAVX > 0) { + vpclmulhdq(xtmp, xK, xcrc); + vpclmulldq(xcrc, xK, xcrc); + pxor(xcrc, xbuf); + pxor(xcrc, xtmp); + } else { + movdqa(xtmp, xcrc); + pclmulhdq(xtmp, xK); + pclmulldq(xcrc, xK); + pxor(xcrc, xbuf); + pxor(xcrc, xtmp); + } } /** @@ -7444,9 +7922,17 @@ // Fold 128 bits in xmm1 down into 32 bits in crc register. BIND(L_fold_128b); movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr())); - vpclmulqdq(xmm2, xmm0, xmm1, 0x1); - vpand(xmm3, xmm0, xmm2, false /* vector256 */); - vpclmulqdq(xmm0, xmm0, xmm3, 0x1); + if (UseAVX > 0) { + vpclmulqdq(xmm2, xmm0, xmm1, 0x1); + vpand(xmm3, xmm0, xmm2, false /* vector256 */); + vpclmulqdq(xmm0, xmm0, xmm3, 0x1); + } else { + movdqa(xmm2, xmm0); + pclmulqdq(xmm2, xmm1, 0x1); + movdqa(xmm3, xmm0); + pand(xmm3, xmm2); + pclmulqdq(xmm0, xmm3, 0x1); + } psrldq(xmm1, 8); psrldq(xmm2, 4); pxor(xmm0, xmm1); --- ./hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -966,6 +966,16 @@ void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } void mulss(XMMRegister dst, AddressLiteral src); + // Carry-Less Multiplication Quadword + void pclmulldq(XMMRegister dst, XMMRegister src) { + // 0x00 - multiply lower 64 bits [0:63] + Assembler::pclmulqdq(dst, src, 0x00); + } + void pclmulhdq(XMMRegister dst, XMMRegister src) { + // 0x11 - multiply upper 64 bits [64:127] + Assembler::pclmulqdq(dst, src, 0x11); + } + void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } void sqrtsd(XMMRegister dst, AddressLiteral src); @@ -1211,6 +1221,28 @@ XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, XMMRegister tmp4, Register tmp5, Register result); +#ifdef _LP64 + void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); + void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, + Register y, Register y_idx, Register z, + Register carry, Register product, + Register idx, Register kdx); + void multiply_add_128_x_128(Register x_xstart, Register y, Register z, + Register yz_idx, Register idx, + Register carry, Register product, int offset); + void multiply_128_x_128_bmi2_loop(Register y, Register z, + Register carry, Register carry2, + Register idx, Register jdx, + Register yz_idx1, Register yz_idx2, + Register tmp, Register tmp3, Register tmp4); + void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, + Register yz_idx, Register idx, Register jdx, + Register carry, Register product, + Register carry2); + void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, + Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); +#endif + // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. void update_byte_crc32(Register crc, Register val, Register table); void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); --- ./hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -3677,6 +3677,70 @@ return start; } + + /** + * Arguments: + * + * Input: + * c_rarg0 - x address + * c_rarg1 - x length + * c_rarg2 - y address + * c_rarg3 - y lenth + * not Win64 + * c_rarg4 - z address + * c_rarg5 - z length + * Win64 + * rsp+40 - z address + * rsp+48 - z length + */ + address generate_multiplyToLen() { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); + + address start = __ pc(); + // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) + // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) + const Register x = rdi; + const Register xlen = rax; + const Register y = rsi; + const Register ylen = rcx; + const Register z = r8; + const Register zlen = r11; + + // Next registers will be saved on stack in multiply_to_len(). + const Register tmp1 = r12; + const Register tmp2 = r13; + const Register tmp3 = r14; + const Register tmp4 = r15; + const Register tmp5 = rbx; + + BLOCK_COMMENT("Entry:"); + __ enter(); // required for proper stackwalking of RuntimeStub frame + +#ifndef _WIN64 + __ movptr(zlen, r9); // Save r9 in r11 - zlen +#endif + setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx + // ylen => rcx, z => r8, zlen => r11 + // r9 and r10 may be used to save non-volatile registers +#ifdef _WIN64 + // last 2 arguments (#4, #5) are on stack on Win64 + __ movptr(z, Address(rsp, 6 * wordSize)); + __ movptr(zlen, Address(rsp, 7 * wordSize)); +#endif + + __ movptr(xlen, rsi); + __ movptr(y, rdx); + __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); + + restore_arg_regs(); + + __ leave(); // required for proper stackwalking of RuntimeStub frame + __ ret(0); + + return start; + } + #undef __ #define __ masm-> @@ -3917,6 +3981,11 @@ generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, &StubRoutines::_safefetchN_fault_pc, &StubRoutines::_safefetchN_continuation_pc); +#ifdef COMPILER2 + if (UseMultiplyToLenIntrinsic) { + StubRoutines::_multiplyToLen = generate_multiplyToLen(); + } +#endif } public: --- ./hotspot/src/cpu/x86/vm/vm_version_x86.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/vm_version_x86.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -493,7 +493,7 @@ } char buf[256]; - jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", cores_per_cpu(), threads_per_core(), cpu_family(), _model, _stepping, (supports_cmov() ? ", cmov" : ""), @@ -522,7 +522,8 @@ (supports_tscinv_bit() ? ", tscinvbit": ""), (supports_tscinv() ? ", tscinv": ""), (supports_bmi1() ? ", bmi1" : ""), - (supports_bmi2() ? ", bmi2" : "")); + (supports_bmi2() ? ", bmi2" : ""), + (supports_adx() ? ", adx" : "")); _features_str = strdup(buf); // UseSSE is set to the smaller of what hardware supports and what @@ -568,13 +569,13 @@ FLAG_SET_DEFAULT(UseCLMUL, false); } - if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) { + if (UseCLMUL && (UseSSE > 2)) { if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { UseCRC32Intrinsics = true; } } else if (UseCRC32Intrinsics) { if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) - warning("CRC32 Intrinsics requires AVX and CLMUL instructions (not available on this CPU)"); + warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)"); FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); } @@ -590,6 +591,17 @@ FLAG_SET_DEFAULT(UseAESIntrinsics, false); } + if (UseSHA) { + warning("SHA instructions are not available on this CPU"); + FLAG_SET_DEFAULT(UseSHA, false); + } + if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) { + warning("SHA intrinsics are not available on this CPU"); + FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); + FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); + FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); + } + // Adjust RTM (Restricted Transactional Memory) flags if (!supports_rtm() && UseRTMLocking) { // Can't continue because UseRTMLocking affects UseBiasedLocking flag @@ -601,6 +613,17 @@ #if INCLUDE_RTM_OPT if (UseRTMLocking) { + if (is_intel_family_core()) { + if ((_model == CPU_MODEL_HASWELL_E3) || + (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || + (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { + if (!UnlockExperimentalVMOptions) { + vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); + } else { + warning("UseRTMLocking is only available as experimental option on this platform."); + } + } + } if (!FLAG_IS_CMDLINE(UseRTMLocking)) { // RTM locking should be used only for applications with // high lock contention. For now we do not use it by default. @@ -675,7 +698,20 @@ } #endif } + +#ifdef _LP64 + if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { + UseMultiplyToLenIntrinsic = true; + } +#else + if (UseMultiplyToLenIntrinsic) { + if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { + warning("multiplyToLen intrinsic is not available in 32-bit VM"); + } + FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); + } #endif +#endif // COMPILER2 // On new cpus instructions which update whole XMM register should be used // to prevent partial register stall due to dependencies on high half. @@ -803,6 +839,24 @@ } } } + if ((cpu_family() == 0x06) && + ((extended_cpu_model() == 0x36) || // Centerton + (extended_cpu_model() == 0x37) || // Silvermont + (extended_cpu_model() == 0x4D))) { +#ifdef COMPILER2 + if (FLAG_IS_DEFAULT(OptoScheduling)) { + OptoScheduling = true; + } +#endif + if (supports_sse4_2()) { // Silvermont + if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { + UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus + } + } + } + if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { + AllocatePrefetchInstr = 3; + } } // Use count leading zeros count instruction if available. @@ -815,23 +869,40 @@ FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); } + // Use count trailing zeros instruction if available if (supports_bmi1()) { + // tzcnt does not require VEX prefix + if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { + if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { + // Don't use tzcnt if BMI1 is switched off on command line. + UseCountTrailingZerosInstruction = false; + } else { + UseCountTrailingZerosInstruction = true; + } + } + } else if (UseCountTrailingZerosInstruction) { + warning("tzcnt instruction is not available on this CPU"); + FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); + } + + // BMI instructions (except tzcnt) use an encoding with VEX prefix. + // VEX prefix is generated only when AVX > 0. + if (supports_bmi1() && supports_avx()) { if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { UseBMI1Instructions = true; } } else if (UseBMI1Instructions) { - warning("BMI1 instructions are not available on this CPU"); + warning("BMI1 instructions are not available on this CPU (AVX is also required)"); FLAG_SET_DEFAULT(UseBMI1Instructions, false); } - // Use count trailing zeros instruction if available - if (supports_bmi1()) { - if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { - UseCountTrailingZerosInstruction = UseBMI1Instructions; + if (supports_bmi2() && supports_avx()) { + if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { + UseBMI2Instructions = true; } - } else if (UseCountTrailingZerosInstruction) { - warning("tzcnt instruction is not available on this CPU"); - FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); + } else if (UseBMI2Instructions) { + warning("BMI2 instructions are not available on this CPU (AVX is also required)"); + FLAG_SET_DEFAULT(UseBMI2Instructions, false); } // Use population count instruction if available. @@ -890,23 +961,25 @@ AllocatePrefetchDistance = allocate_prefetch_distance(); AllocatePrefetchStyle = allocate_prefetch_style(); - if( is_intel() && cpu_family() == 6 && supports_sse3() ) { - if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core + if (is_intel() && cpu_family() == 6 && supports_sse3()) { + if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core #ifdef _LP64 AllocatePrefetchDistance = 384; #else AllocatePrefetchDistance = 320; #endif } - if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus + if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus AllocatePrefetchDistance = 192; AllocatePrefetchLines = 4; + } #ifdef COMPILER2 - if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) { + if (supports_sse4_2()) { + if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { FLAG_SET_DEFAULT(UseFPUForSpilling, true); } + } #endif - } } assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value"); --- ./hotspot/src/cpu/x86/vm/vm_version_x86.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/x86/vm/vm_version_x86.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -209,7 +209,9 @@ erms : 1, : 1, rtm : 1, - : 20; + : 7, + adx : 1, + : 12; } bits; }; @@ -260,7 +262,8 @@ CPU_CLMUL = (1 << 21), // carryless multiply for CRC CPU_BMI1 = (1 << 22), CPU_BMI2 = (1 << 23), - CPU_RTM = (1 << 24) // Restricted Transactional Memory instructions + CPU_RTM = (1 << 24), // Restricted Transactional Memory instructions + CPU_ADX = (1 << 25) } cpuFeatureFlags; enum { @@ -276,7 +279,10 @@ CPU_MODEL_WESTMERE_EX = 0x2f, CPU_MODEL_SANDYBRIDGE = 0x2a, CPU_MODEL_SANDYBRIDGE_EP = 0x2d, - CPU_MODEL_IVYBRIDGE_EP = 0x3a + CPU_MODEL_IVYBRIDGE_EP = 0x3a, + CPU_MODEL_HASWELL_E3 = 0x3c, + CPU_MODEL_HASWELL_E7 = 0x3f, + CPU_MODEL_BROADWELL = 0x3d } cpuExtendedFamily; // cpuid information block. All info derived from executing cpuid with @@ -462,10 +468,16 @@ } // Intel features. if(is_intel()) { + if(_cpuid_info.sef_cpuid7_ebx.bits.adx != 0) + result |= CPU_ADX; if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0) result |= CPU_BMI2; if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0) result |= CPU_LZCNT; + // for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw + if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) { + result |= CPU_3DNOW_PREFETCH; + } } return result; @@ -618,6 +630,7 @@ static bool supports_rtm() { return (_cpuFeatures & CPU_RTM) != 0; } static bool supports_bmi1() { return (_cpuFeatures & CPU_BMI1) != 0; } static bool supports_bmi2() { return (_cpuFeatures & CPU_BMI2) != 0; } + static bool supports_adx() { return (_cpuFeatures & CPU_ADX) != 0; } // Intel features static bool is_intel_family_core() { return is_intel() && extended_cpu_family() == CPU_FAMILY_INTEL_CORE; } --- ./hotspot/src/cpu/zero/vm/compiledIC_zero.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/zero/vm/compiledIC_zero.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -58,34 +58,6 @@ return is_icholder_entry(call->destination()); } -//----------------------------------------------------------------------------- -// High-level access to an inline cache. Guaranteed to be MT-safe. - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // Search for the ic_call at the given address. - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; - } -} - // ---------------------------------------------------------------------------- void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { --- ./hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -40,6 +40,7 @@ #include "runtime/deoptimization.hpp" #include "runtime/frame.inline.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.hpp" --- ./hotspot/src/os/aix/vm/os_aix.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/aix/vm/os_aix.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -55,6 +55,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" #include "runtime/sharedRuntime.hpp" @@ -113,12 +114,6 @@ } #endif -// Excerpts from systemcfg.h definitions newer than AIX 5.3 -#ifndef PV_7 -# define PV_7 0x200000 // Power PC 7 -# define PV_7_Compat 0x208000 // Power PC 7 -#endif - #define MAX_PATH (2 * K) // for timer info max values which include all bits @@ -129,17 +124,40 @@ #define ERROR_MP_VMGETINFO_FAILED 102 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103 -// the semantics in this file are thus that codeptr_t is a *real code ptr* +// The semantics in this file are thus that codeptr_t is a *real code ptr*. // This means that any function taking codeptr_t as arguments will assume // a real codeptr and won't handle function descriptors (eg getFuncName), // whereas functions taking address as args will deal with function -// descriptors (eg os::dll_address_to_library_name) +// descriptors (eg os::dll_address_to_library_name). typedef unsigned int* codeptr_t; -// typedefs for stackslots, stack pointers, pointers to op codes +// Typedefs for stackslots, stack pointers, pointers to op codes. typedef unsigned long stackslot_t; typedef stackslot_t* stackptr_t; +// Excerpts from systemcfg.h definitions newer than AIX 5.3. +#ifndef PV_7 +#define PV_7 0x200000 /* Power PC 7 */ +#define PV_7_Compat 0x208000 /* Power PC 7 */ +#endif +#ifndef PV_8 +#define PV_8 0x300000 /* Power PC 8 */ +#define PV_8_Compat 0x308000 /* Power PC 8 */ +#endif + +#define trcVerbose(fmt, ...) { /* PPC port */ \ + if (Verbose) { \ + fprintf(stderr, fmt, ##__VA_ARGS__); \ + fputc('\n', stderr); fflush(stderr); \ + } \ +} +#define trc(fmt, ...) /* PPC port */ + +#define ERRBYE(s) { \ + trcVerbose(s); \ + return -1; \ +} + // query dimensions of the stack of the calling thread static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size); @@ -171,12 +189,12 @@ return true; } -// macro to check a given stack pointer against given stack limits and to die if test fails +// Macro to check a given stack pointer against given stack limits and to die if test fails. #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \ guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \ } -// macro to check the current stack pointer against given stacklimits +// Macro to check the current stack pointer against given stacklimits. #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \ address sp; \ sp = os::current_stack_pointer(); \ @@ -210,7 +228,7 @@ static pid_t _initial_pid = 0; static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769) static sigset_t SR_sigset; -static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls */ +static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls. julong os::available_memory() { return Aix::available_memory(); @@ -242,7 +260,6 @@ return false; } - // Return true if user is running as root. bool os::have_special_privileges() { @@ -273,8 +290,7 @@ for (int i = 0; i < numFullDisclaimsNeeded; i ++) { if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) { - //if (Verbose) - fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno); + trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno); return false; } p += maxDisclaimSize; @@ -282,8 +298,7 @@ if (lastDisclaimSize > 0) { if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) { - //if (Verbose) - fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno); + trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno); return false; } } @@ -323,11 +338,11 @@ void os::Aix::initialize_system_info() { - // get the number of online(logical) cpus instead of configured + // Get the number of online(logical) cpus instead of configured. os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN); assert(_processor_count > 0, "_processor_count must be > 0"); - // retrieve total physical storage + // Retrieve total physical storage. os::Aix::meminfo_t mi; if (!os::Aix::get_meminfo(&mi)) { fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr); @@ -502,7 +517,6 @@ } // end os::Aix::query_multipage_support() -// The code for this method was initially derived from the version in os_linux.cpp. void os::init_system_properties_values() { #define DEFAULT_LIBPATH "/usr/lib:/lib" @@ -599,10 +613,11 @@ sigaction(sig, (struct sigaction*)NULL, &oact); void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) : CAST_FROM_FN_PTR(void*, oact.sa_handler); - if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) + if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) { return true; - else + } else { return false; + } } void os::Aix::signal_sets_init() { @@ -776,6 +791,9 @@ // get the processor version from _system_configuration switch (_system_configuration.version) { + case PV_8: + strcpy(pci->version, "Power PC 8"); + break; case PV_7: strcpy(pci->version, "Power PC 7"); break; @@ -803,6 +821,9 @@ case PV_7_Compat: strcpy(pci->version, "PV_7_Compat"); break; + case PV_8_Compat: + strcpy(pci->version, "PV_8_Compat"); + break; default: strcpy(pci->version, "unknown"); } @@ -938,7 +959,9 @@ pthread_attr_destroy(&attr); - if (ret != 0) { + if (ret == 0) { + // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid)); + } else { if (PrintMiscellaneous && (Verbose || WizardMode)) { perror("pthread_create()"); } @@ -1095,8 +1118,7 @@ if (os::Aix::on_pase()) { Unimplemented(); return 0; - } - else { + } else { // On AIX use the precision of processors real time clock // or time base registers. timebasestruct_t time; @@ -1149,7 +1171,6 @@ } } - char * os::local_time_string(char *buf, size_t buflen) { struct tm t; time_t long_time; @@ -1187,7 +1208,6 @@ if (abort_hook != NULL) { abort_hook(); } - } // Note: os::abort() might be called very early during initialization, or @@ -1219,8 +1239,7 @@ // from src/solaris/hpi/src/system_md.c size_t os::lasterror(char *buf, size_t len) { - - if (errno == 0) return 0; + if (errno == 0) return 0; const char *s = ::strerror(errno); size_t n = ::strlen(s); @@ -1233,6 +1252,7 @@ } intx os::current_thread_id() { return (intx)pthread_self(); } + int os::current_process_id() { // This implementation returns a unique pid, the pid of the @@ -1369,9 +1389,9 @@ if (offset) { *offset = -1; } - if (buf) { - buf[0] = '\0'; - } + // Buf is not optional, but offset is optional. + assert(buf != NULL, "sanity check"); + buf[0] = '\0'; // Resolve function ptr literals first. addr = resolve_function_descriptor_to_code_pointer(addr); @@ -1404,12 +1424,9 @@ return 0; } - if (Verbose) { - fprintf(stderr, "pc outside any module"); - } + trcVerbose("pc outside any module"); return -1; - } bool os::dll_address_to_library_name(address addr, char* buf, @@ -1417,9 +1434,9 @@ if (offset) { *offset = -1; } - if (buf) { - buf[0] = '\0'; - } + // Buf is not optional, but offset is optional. + assert(buf != NULL, "sanity check"); + buf[0] = '\0'; // Resolve function ptr literals first. addr = resolve_function_descriptor_to_code_pointer(addr); @@ -1434,7 +1451,7 @@ } // Loads .dll/.so and in case of error it checks if .dll/.so was built -// for the same architecture as Hotspot is running on +// for the same architecture as Hotspot is running on. void *os::dll_load(const char *filename, char *ebuf, int ebuflen) { if (ebuf && ebuflen > 0) { @@ -1597,7 +1614,6 @@ st->cr(); } - static void print_signal_handler(outputStream* st, int sig, char* buf, size_t buflen); @@ -1621,7 +1637,7 @@ static char saved_jvm_path[MAXPATHLEN] = {0}; -// Find the full path to the current module, libjvm.so or libjvm_g.so +// Find the full path to the current module, libjvm.so. void os::jvm_path(char *buf, jint buflen) { // Error checking. if (buflen < MAXPATHLEN) { @@ -1691,7 +1707,7 @@ // Do not block out synchronous signals in the signal handler. // Blocking synchronous signals only makes sense if you can really // be sure that those signals won't happen during signal handling, - // when the blocking applies. Normal signal handlers are lean and + // when the blocking applies. Normal signal handlers are lean and // do not cause signals. But our signal handlers tend to be "risky" // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen. // On AIX, PASE there was a case where a SIGSEGV happened, followed @@ -2966,13 +2982,9 @@ param.sched_priority = newpri; int ret = pthread_setschedparam(thr, policy, ¶m); - if (Verbose) { - if (ret == 0) { - fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri); - } else { - fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n", - (int)thr, newpri, ret, strerror(ret)); - } + if (ret != 0) { + trcVerbose("Could not change priority for thread %d to %d (error %d, %s)", + (int)thr, newpri, ret, strerror(ret)); } return (ret == 0) ? OS_OK : OS_ERR; } @@ -3093,7 +3105,6 @@ errno = old_errno; } - static int SR_initialize() { struct sigaction act; char *s; @@ -3336,7 +3347,6 @@ JVM_handle_aix_signal(sig, info, uc, true); } - // This boolean allows users to forward their own non-matching signals // to JVM_handle_aix_signal, harmlessly. bool os::Aix::signal_handlers_are_installed = false; @@ -3530,7 +3540,7 @@ set_signal_handler(SIGDANGER, true); if (libjsig_is_loaded) { - // Tell libjsig jvm finishes setting signal handlers + // Tell libjsig jvm finishes setting signal handlers. (*end_signal_setting)(); } @@ -3546,7 +3556,7 @@ tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); check_signals = false; } - // need to initialize check_signal_done + // Need to initialize check_signal_done. ::sigemptyset(&check_signal_done); } } @@ -3620,7 +3630,6 @@ st->cr(); } - #define DO_SIGNAL_CHECK(sig) \ if (!sigismember(&check_signal_done, sig)) \ os::Aix::check_signal_handler(sig) @@ -3681,7 +3690,6 @@ ? CAST_FROM_FN_PTR(address, act.sa_sigaction) : CAST_FROM_FN_PTR(address, act.sa_handler); - switch(sig) { case SIGSEGV: case SIGBUS: @@ -3829,15 +3837,13 @@ pthread_mutex_init(&dl_mutex, NULL); } -// this is called _after_ the global arguments have been parsed +// This is called _after_ the global arguments have been parsed. jint os::init_2(void) { - if (Verbose) { - fprintf(stderr, "processor count: %d\n", os::_processor_count); - fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory); - } - - // initially build up the loaded dll map + trcVerbose("processor count: %d", os::_processor_count); + trcVerbose("physical memory: %lu", Aix::_physical_memory); + + // Initially build up the loaded dll map. LoadedLibraries::reload(); const int page_size = Aix::page_size(); @@ -3887,7 +3893,7 @@ } if (map_address != (address) MAP_FAILED) { - // map succeeded, but polling_page is not at wished address, unmap and continue. + // Map succeeded, but polling_page is not at wished address, unmap and continue. ::munmap(map_address, map_size); map_address = (address) MAP_FAILED; } @@ -3941,7 +3947,7 @@ // Make the stack size a multiple of the page size so that // the yellow/red zones can be guarded. - // note that this can be 0, if no default stacksize was set + // Note that this can be 0, if no default stacksize was set. JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size())); Aix::libpthread_init(); @@ -4254,7 +4260,6 @@ return fd; } - // create binary file, rewriting existing file if required int os::create_binary_file(const char* path, bool rewrite_existing) { int oflags = O_WRONLY | O_CREAT; @@ -4323,7 +4328,6 @@ return NULL; } - // Remap a block of memory. char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, @@ -4371,14 +4375,14 @@ jlong sys_time = 0; jlong user_time = 0; - // reimplemented using getthrds64(). + // Reimplemented using getthrds64(). // - // goes like this: + // Works like this: // For the thread in question, get the kernel thread id. Then get the // kernel thread statistics using that id. // // This only works of course when no pthread scheduling is used, - // ie there is a 1:1 relationship to kernel threads. + // i.e. there is a 1:1 relationship to kernel threads. // On AIX, see AIXTHREAD_SCOPE variable. pthread_t pthtid = thread->osthread()->pthread_id(); @@ -4525,14 +4529,12 @@ memset(&uts, 0, sizeof(uts)); strcpy(uts.sysname, "?"); if (::uname(&uts) == -1) { - fprintf(stderr, "uname failed (%d)\n", errno); + trc("uname failed (%d)", errno); guarantee(0, "Could not determine whether we run on AIX or PASE"); } else { - if (Verbose) { - fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" " - "node \"%s\" machine \"%s\"\n", - uts.sysname, uts.version, uts.release, uts.nodename, uts.machine); - } + trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" " + "node \"%s\" machine \"%s\"\n", + uts.sysname, uts.version, uts.release, uts.nodename, uts.machine); const int major = atoi(uts.version); assert(major > 0, "invalid OS version"); const int minor = atoi(uts.release); @@ -4544,12 +4546,10 @@ // We run on AIX. We do not support versions older than AIX 5.3. _on_pase = 0; if (_os_version < 0x0503) { - fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n"); + trc("AIX release older than AIX 5.3 not supported."); assert(false, "AIX release too old."); } else { - if (Verbose) { - fprintf(stderr, "We run on AIX %d.%d\n", major, minor); - } + trcVerbose("We run on AIX %d.%d\n", major, minor); } } else { assert(false, "unknown OS"); @@ -4557,7 +4557,6 @@ } guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release"); - } // end: os::Aix::initialize_os_info() // Scan environment for important settings which might effect the VM. @@ -4595,12 +4594,10 @@ // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before // exec() ? before loading the libjvm ? ....) p = ::getenv("XPG_SUS_ENV"); - if (Verbose) { - fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : ""); - } + trcVerbose("XPG_SUS_ENV=%s.", p ? p : ""); if (p && strcmp(p, "ON") == 0) { _xpg_sus_mode = 1; - fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n"); + trc("Unsupported setting: XPG_SUS_ENV=ON"); // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to // clobber address ranges. If we ever want to support that, we have to do some // testing first. @@ -4612,10 +4609,7 @@ // Switch off AIX internal (pthread) guard pages. This has // immediate effect for any pthread_create calls which follow. p = ::getenv("AIXTHREAD_GUARDPAGES"); - if (Verbose) { - fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : ""); - fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n"); - } + trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : ""); rc = ::putenv("AIXTHREAD_GUARDPAGES=0"); guarantee(rc == 0, ""); @@ -4633,7 +4627,7 @@ assert(os::Aix::on_aix(), "AIX only"); if (!libperfstat::init()) { - fprintf(stderr, "libperfstat initialization failed.\n"); + trc("libperfstat initialization failed."); assert(false, "libperfstat initialization failed"); } else { if (Verbose) { @@ -4805,7 +4799,6 @@ return abstime; } - // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. // Conceptually TryPark() should be equivalent to park(0). @@ -4888,7 +4881,7 @@ while (_Event < 0) { status = pthread_cond_timedwait(_cond, _mutex, &abst); assert_status(status == 0 || status == ETIMEDOUT, - status, "cond_timedwait"); + status, "cond_timedwait"); if (!FilterSpuriousWakeups) break; // previous semantics if (status == ETIMEDOUT) break; // We consume and ignore EINTR and spurious wakeups. @@ -5022,9 +5015,9 @@ // Optional fast-path check: // Return immediately if a permit is available. if (_counter > 0) { - _counter = 0; - OrderAccess::fence(); - return; + _counter = 0; + OrderAccess::fence(); + return; } Thread* thread = Thread::current(); @@ -5046,7 +5039,6 @@ unpackTime(&absTime, isAbsolute, time); } - // Enter safepoint region // Beware of deadlocks such as 6317397. // The per-thread Parker:: mutex is a classic leaf-lock. @@ -5134,7 +5126,6 @@ } } - extern char** environ; // Run the specified command in a separate process. Return its exit value, @@ -5153,44 +5144,43 @@ } else if (pid == 0) { // child process - // try to be consistent with system(), which uses "/usr/bin/sh" on AIX + // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX. execve("/usr/bin/sh", argv, environ); // execve failed _exit(-1); - } else { + } else { // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't // care about the actual exit code, for now. int status; - // Wait for the child process to exit. This returns immediately if + // Wait for the child process to exit. This returns immediately if // the child has already exited. */ while (waitpid(pid, &status, 0) < 0) { - switch (errno) { + switch (errno) { case ECHILD: return 0; case EINTR: break; default: return -1; - } + } } if (WIFEXITED(status)) { - // The child exited normally; get its exit code. - return WEXITSTATUS(status); + // The child exited normally; get its exit code. + return WEXITSTATUS(status); } else if (WIFSIGNALED(status)) { - // The child exited because of a signal - // The best value to return is 0x80 + signal number, - // because that is what all Unix shells do, and because - // it allows callers to distinguish between process exit and - // process death by signal. - return 0x80 + WTERMSIG(status); + // The child exited because of a signal. + // The best value to return is 0x80 + signal number, + // because that is what all Unix shells do, and because + // it allows callers to distinguish between process exit and + // process death by signal. + return 0x80 + WTERMSIG(status); } else { - // Unknown exit code; pass it through - return status; + // Unknown exit code; pass it through. + return status; } } - // Remove warning. return -1; } @@ -5205,7 +5195,7 @@ struct stat statbuf; char buf[MAXPATHLEN]; char libmawtpath[MAXPATHLEN]; - const char *xawtstr = "/xawt/libmawt.so"; + const char *xawtstr = "/xawt/libmawt.so"; const char *new_xawtstr = "/libawt_xawt.so"; char *p; --- ./hotspot/src/os/aix/vm/os_aix.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/aix/vm/os_aix.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -209,7 +209,7 @@ return _can_use_16M_pages == 1 ? true : false; } - static address ucontext_get_pc(ucontext_t* uc); + static address ucontext_get_pc(const ucontext_t* uc); static intptr_t* ucontext_get_sp(ucontext_t* uc); static intptr_t* ucontext_get_fp(ucontext_t* uc); // Set PC into context. Needed for continuation after signal. --- ./hotspot/src/os/aix/vm/os_aix.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/aix/vm/os_aix.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,12 +26,9 @@ #ifndef OS_AIX_VM_OS_AIX_INLINE_HPP #define OS_AIX_VM_OS_AIX_INLINE_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" -#ifdef TARGET_OS_ARCH_aix_ppc -# include "atomic_aix_ppc.inline.hpp" -# include "orderAccess_aix_ppc.inline.hpp" -#endif // System includes --- ./hotspot/src/os/aix/vm/perfMemory_aix.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/aix/vm/perfMemory_aix.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -31,6 +31,7 @@ #include "os_aix.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/perfMemory.hpp" +#include "services/memTracker.hpp" #include "utilities/exceptions.hpp" // put OS-includes here @@ -196,12 +197,37 @@ return pid; } +// Check if the given statbuf is considered a secure directory for +// the backing store files. Returns true if the directory is considered +// a secure location. Returns false if the statbuf is a symbolic link or +// if an error occurred. +static bool is_statbuf_secure(struct stat *statp) { + if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) { + // The path represents a link or some non-directory file type, + // which is not what we expected. Declare it insecure. + // + return false; + } + // We have an existing directory, check if the permissions are safe. + if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) { + // The directory is open for writing and could be subjected + // to a symlink or a hard link attack. Declare it insecure. + return false; + } + // See if the uid of the directory matches the effective uid of the process. + // + if (statp->st_uid != geteuid()) { + // The directory was not created by this user, declare it insecure. + return false; + } + return true; +} -// check if the given path is considered a secure directory for + +// Check if the given path is considered a secure directory for // the backing store files. Returns true if the directory exists // and is considered a secure location. Returns false if the path // is a symbolic link or if an error occurred. -// static bool is_directory_secure(const char* path) { struct stat statbuf; int result = 0; @@ -211,38 +237,276 @@ return false; } - // the path exists, now check it's mode - if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) { - // the path represents a link or some non-directory file type, - // which is not what we expected. declare it insecure. - // + // The path exists, see if it is secure. + return is_statbuf_secure(&statbuf); +} + +// (Taken over from Solaris to support the O_NOFOLLOW case on AIX.) +// Check if the given directory file descriptor is considered a secure +// directory for the backing store files. Returns true if the directory +// exists and is considered a secure location. Returns false if the path +// is a symbolic link or if an error occurred. +static bool is_dirfd_secure(int dir_fd) { + struct stat statbuf; + int result = 0; + + RESTARTABLE(::fstat(dir_fd, &statbuf), result); + if (result == OS_ERR) { return false; } - else { - // we have an existing directory, check if the permissions are safe. - // - if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) { - // the directory is open for writing and could be subjected - // to a symlnk attack. declare it insecure. - // - return false; + + // The path exists, now check its mode. + return is_statbuf_secure(&statbuf); +} + + +// Check to make sure fd1 and fd2 are referencing the same file system object. +static bool is_same_fsobject(int fd1, int fd2) { + struct stat statbuf1; + struct stat statbuf2; + int result = 0; + + RESTARTABLE(::fstat(fd1, &statbuf1), result); + if (result == OS_ERR) { + return false; + } + RESTARTABLE(::fstat(fd2, &statbuf2), result); + if (result == OS_ERR) { + return false; + } + + if ((statbuf1.st_ino == statbuf2.st_ino) && + (statbuf1.st_dev == statbuf2.st_dev)) { + return true; + } else { + return false; + } +} + +// Helper functions for open without O_NOFOLLOW which is not present on AIX 5.3/6.1. +// We use the jdk6 implementation here. +#ifndef O_NOFOLLOW +// The O_NOFOLLOW oflag doesn't exist before solaris 5.10, this is to simulate that behaviour +// was done in jdk 5/6 hotspot by Oracle this way +static int open_o_nofollow_impl(const char* path, int oflag, mode_t mode, bool use_mode) { + struct stat orig_st; + struct stat new_st; + bool create; + int error; + int fd; + + create = false; + + if (lstat(path, &orig_st) != 0) { + if (errno == ENOENT && (oflag & O_CREAT) != 0) { + // File doesn't exist, but_we want to create it, add O_EXCL flag + // to make sure no-one creates it (or a symlink) before us + // This works as we expect with symlinks, from posix man page: + // 'If O_EXCL and O_CREAT are set, and path names a symbolic + // link, open() shall fail and set errno to [EEXIST]'. + oflag |= O_EXCL; + create = true; + } else { + // File doesn't exist, and we are not creating it. + return OS_ERR; } + } else { + // Lstat success, check if existing file is a link. + if ((orig_st.st_mode & S_IFMT) == S_IFLNK) { + // File is a symlink. + errno = ELOOP; + return OS_ERR; + } + } + + if (use_mode == true) { + fd = open(path, oflag, mode); + } else { + fd = open(path, oflag); + } + + if (fd == OS_ERR) { + return fd; + } + + // Can't do inode checks on before/after if we created the file. + if (create == false) { + if (fstat(fd, &new_st) != 0) { + // Keep errno from fstat, in case close also fails. + error = errno; + ::close(fd); + errno = error; + return OS_ERR; + } + + if (orig_st.st_dev != new_st.st_dev || orig_st.st_ino != new_st.st_ino) { + // File was tampered with during race window. + ::close(fd); + errno = EEXIST; + if (PrintMiscellaneous && Verbose) { + warning("possible file tampering attempt detected when opening %s", path); + } + return OS_ERR; + } + } + + return fd; +} + +static int open_o_nofollow(const char* path, int oflag, mode_t mode) { + return open_o_nofollow_impl(path, oflag, mode, true); +} + +static int open_o_nofollow(const char* path, int oflag) { + return open_o_nofollow_impl(path, oflag, 0, false); +} +#endif + +// Open the directory of the given path and validate it. +// Return a DIR * of the open directory. +static DIR *open_directory_secure(const char* dirname) { + // Open the directory using open() so that it can be verified + // to be secure by calling is_dirfd_secure(), opendir() and then check + // to see if they are the same file system object. This method does not + // introduce a window of opportunity for the directory to be attacked that + // calling opendir() and is_directory_secure() does. + int result; + DIR *dirp = NULL; + + // No O_NOFOLLOW defined at buildtime, and it is not documented for open; + // so provide a workaround in this case. +#ifdef O_NOFOLLOW + RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result); +#else + // workaround (jdk6 coding) + RESTARTABLE(::open_o_nofollow(dirname, O_RDONLY), result); +#endif + + if (result == OS_ERR) { + // Directory doesn't exist or is a symlink, so there is nothing to cleanup. + if (PrintMiscellaneous && Verbose) { + if (errno == ELOOP) { + warning("directory %s is a symlink and is not secure\n", dirname); + } else { + warning("could not open directory %s: %s\n", dirname, strerror(errno)); + } + } + return dirp; + } + int fd = result; + + // Determine if the open directory is secure. + if (!is_dirfd_secure(fd)) { + // The directory is not a secure directory. + os::close(fd); + return dirp; + } + + // Open the directory. + dirp = ::opendir(dirname); + if (dirp == NULL) { + // The directory doesn't exist, close fd and return. + os::close(fd); + return dirp; + } + + // Check to make sure fd and dirp are referencing the same file system object. + if (!is_same_fsobject(fd, dirp->dd_fd)) { + // The directory is not secure. + os::close(fd); + os::closedir(dirp); + dirp = NULL; + return dirp; + } + + // Close initial open now that we know directory is secure + os::close(fd); + + return dirp; +} + +// NOTE: The code below uses fchdir(), open() and unlink() because +// fdopendir(), openat() and unlinkat() are not supported on all +// versions. Once the support for fdopendir(), openat() and unlinkat() +// is available on all supported versions the code can be changed +// to use these functions. + +// Open the directory of the given path, validate it and set the +// current working directory to it. +// Return a DIR * of the open directory and the saved cwd fd. +// +static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) { + + // Open the directory. + DIR* dirp = open_directory_secure(dirname); + if (dirp == NULL) { + // Directory doesn't exist or is insecure, so there is nothing to cleanup. + return dirp; + } + int fd = dirp->dd_fd; + + // Open a fd to the cwd and save it off. + int result; + RESTARTABLE(::open(".", O_RDONLY), result); + if (result == OS_ERR) { + *saved_cwd_fd = -1; + } else { + *saved_cwd_fd = result; + } + + // Set the current directory to dirname by using the fd of the directory. + result = fchdir(fd); + + return dirp; +} + +// Close the directory and restore the current working directory. +static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) { + + int result; + // If we have a saved cwd change back to it and close the fd. + if (saved_cwd_fd != -1) { + result = fchdir(saved_cwd_fd); + ::close(saved_cwd_fd); + } + + // Close the directory. + os::closedir(dirp); +} + +// Check if the given file descriptor is considered a secure. +static bool is_file_secure(int fd, const char *filename) { + + int result; + struct stat statbuf; + + // Determine if the file is secure. + RESTARTABLE(::fstat(fd, &statbuf), result); + if (result == OS_ERR) { + if (PrintMiscellaneous && Verbose) { + warning("fstat failed on %s: %s\n", filename, strerror(errno)); + } + return false; + } + if (statbuf.st_nlink > 1) { + // A file with multiple links is not expected. + if (PrintMiscellaneous && Verbose) { + warning("file %s has multiple links\n", filename); + } + return false; } return true; } - -// return the user name for the given user id +// Return the user name for the given user id. // -// the caller is expected to free the allocated memory. -// +// The caller is expected to free the allocated memory. static char* get_user_name(uid_t uid) { struct passwd pwent; - // determine the max pwbuf size from sysconf, and hardcode + // Determine the max pwbuf size from sysconf, and hardcode // a default if this not available through sysconf. - // long bufsize = sysconf(_SC_GETPW_R_SIZE_MAX); if (bufsize == -1) bufsize = 1024; @@ -344,7 +608,8 @@ strcat(usrdir_name, "/"); strcat(usrdir_name, dentry->d_name); - DIR* subdirp = os::opendir(usrdir_name); + // Open the user directory. + DIR* subdirp = open_directory_secure(usrdir_name); if (subdirp == NULL) { FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); @@ -464,28 +729,7 @@ } } - -// remove file -// -// this method removes the file with the given file name in the -// named directory. -// -static void remove_file(const char* dirname, const char* filename) { - - size_t nbytes = strlen(dirname) + strlen(filename) + 2; - char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); - - strcpy(path, dirname); - strcat(path, "/"); - strcat(path, filename); - - remove_file(path); - - FREE_C_HEAP_ARRAY(char, path, mtInternal); -} - - -// cleanup stale shared memory resources +// Cleanup stale shared memory resources // // This method attempts to remove all stale shared memory files in // the named user temporary directory. It scans the named directory @@ -493,32 +737,26 @@ // process id is extracted from the file name and a test is run to // determine if the process is alive. If the process is not alive, // any stale file resources are removed. -// static void cleanup_sharedmem_resources(const char* dirname) { - // open the user temp directory - DIR* dirp = os::opendir(dirname); - + int saved_cwd_fd; + // Open the directory. + DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd); if (dirp == NULL) { - // directory doesn't exist, so there is nothing to cleanup + // Directory doesn't exist or is insecure, so there is nothing to cleanup. return; } - if (!is_directory_secure(dirname)) { - // the directory is not a secure directory - return; - } - - // for each entry in the directory that matches the expected file + // For each entry in the directory that matches the expected file // name pattern, determine if the file resources are stale and if // so, remove the file resources. Note, instrumented HotSpot processes // for this user may start and/or terminate during this search and // remove or create new files in this directory. The behavior of this // loop under these conditions is dependent upon the implementation of // opendir/readdir. - // struct dirent* entry; char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal); + errno = 0; while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) { @@ -528,56 +766,55 @@ if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) { - // attempt to remove all unexpected files, except "." and ".." - remove_file(dirname, entry->d_name); + // Attempt to remove all unexpected files, except "." and "..". + unlink(entry->d_name); } errno = 0; continue; } - // we now have a file name that converts to a valid integer + // We now have a file name that converts to a valid integer // that could represent a process id . if this process id // matches the current process id or the process is not running, // then remove the stale file resources. // - // process liveness is detected by sending signal number 0 to + // Process liveness is detected by sending signal number 0 to // the process id (see kill(2)). if kill determines that the // process does not exist, then the file resources are removed. // if kill determines that that we don't have permission to // signal the process, then the file resources are assumed to // be stale and are removed because the resources for such a // process should be in a different user specific directory. - // if ((pid == os::current_process_id()) || (kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) { - remove_file(dirname, entry->d_name); + unlink(entry->d_name); } errno = 0; } - os::closedir(dirp); + + // Close the directory and reset the current working directory. + close_directory_secure_cwd(dirp, saved_cwd_fd); + FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); } -// make the user specific temporary directory. Returns true if +// Make the user specific temporary directory. Returns true if // the directory exists and is secure upon return. Returns false // if the directory exists but is either a symlink, is otherwise // insecure, or if an error occurred. -// static bool make_user_tmp_dir(const char* dirname) { - // create the directory with 0755 permissions. note that the directory + // Create the directory with 0755 permissions. note that the directory // will be owned by euid::egid, which may not be the same as uid::gid. - // if (mkdir(dirname, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) == OS_ERR) { if (errno == EEXIST) { // The directory already exists and was probably created by another // JVM instance. However, this could also be the result of a // deliberate symlink. Verify that the existing directory is safe. - // if (!is_directory_secure(dirname)) { - // directory is not secure + // Directory is not secure. if (PrintMiscellaneous && Verbose) { warning("%s directory is insecure\n", dirname); } @@ -613,19 +850,63 @@ return -1; } + int saved_cwd_fd; + // Open the directory and set the current working directory to it. + DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd); + if (dirp == NULL) { + // Directory doesn't exist or is insecure, so cannot create shared + // memory file. + return -1; + } + + // Open the filename in the current directory. + // Cannot use O_TRUNC here; truncation of an existing file has to happen + // after the is_file_secure() check below. int result; - RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result); + // No O_NOFOLLOW defined at buildtime, and it is not documented for open; + // so provide a workaround in this case. +#ifdef O_NOFOLLOW + RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result); +#else + // workaround function (jdk6 code) + RESTARTABLE(::open_o_nofollow(filename, O_RDWR|O_CREAT, S_IREAD|S_IWRITE), result); +#endif + if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("could not create file %s: %s\n", filename, strerror(errno)); + if (errno == ELOOP) { + warning("file %s is a symlink and is not secure\n", filename); + } else { + warning("could not create file %s: %s\n", filename, strerror(errno)); + } } + // Close the directory and reset the current working directory. + close_directory_secure_cwd(dirp, saved_cwd_fd); + return -1; } + // Close the directory and reset the current working directory. + close_directory_secure_cwd(dirp, saved_cwd_fd); // save the file descriptor int fd = result; + // Check to see if the file is secure. + if (!is_file_secure(fd, filename)) { + ::close(fd); + return -1; + } + + // Truncate the file to get rid of any existing data. + RESTARTABLE(::ftruncate(fd, (off_t)0), result); + if (result == OS_ERR) { + if (PrintMiscellaneous && Verbose) { + warning("could not truncate shared memory file: %s\n", strerror(errno)); + } + ::close(fd); + return -1; + } // set the file size RESTARTABLE(::ftruncate(fd, (off_t)size), result); if (result == OS_ERR) { @@ -647,7 +928,14 @@ // open the file int result; + // No O_NOFOLLOW defined at buildtime, and it is not documented for open; + // so provide a workaround in this case +#ifdef O_NOFOLLOW RESTARTABLE(::open(filename, oflags), result); +#else + RESTARTABLE(::open_o_nofollow(filename, oflags), result); +#endif + if (result == OS_ERR) { if (errno == ENOENT) { THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), @@ -661,8 +949,15 @@ THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno)); } } + int fd = result; - return result; + // Check to see if the file is secure. + if (!is_file_secure(fd, filename)) { + ::close(fd); + return -1; + } + + return fd; } // create a named shared memory region. returns the address of the @@ -694,13 +989,21 @@ char* dirname = get_user_tmp_dir(user_name); char* filename = get_sharedmem_filename(dirname, vmid); + // Get the short filename. + char* short_filename = strrchr(filename, '/'); + if (short_filename == NULL) { + short_filename = filename; + } else { + short_filename++; + } + // cleanup any stale shared memory files cleanup_sharedmem_resources(dirname); assert(((size > 0) && (size % os::vm_page_size() == 0)), "unexpected PerfMemory region size"); - fd = create_sharedmem_resources(dirname, filename, size); + fd = create_sharedmem_resources(dirname, short_filename, size); FREE_C_HEAP_ARRAY(char, user_name, mtInternal); FREE_C_HEAP_ARRAY(char, dirname, mtInternal); @@ -732,6 +1035,9 @@ // clear the shared memory region (void)::memset((void*) mapAddress, 0, size); + // It does not go through os api, the operation has to record from here. + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC, mtInternal); + return mapAddress; } @@ -806,7 +1112,7 @@ char* mapAddress; int result; int fd; - size_t size; + size_t size = 0; const char* luser = NULL; int mmap_prot; @@ -818,12 +1124,18 @@ // constructs for the file and the shared memory mapping. if (mode == PerfMemory::PERF_MODE_RO) { mmap_prot = PROT_READ; + + // No O_NOFOLLOW defined at buildtime, and it is not documented for open. +#ifdef O_NOFOLLOW + file_flags = O_RDONLY | O_NOFOLLOW; +#else file_flags = O_RDONLY; +#endif } else if (mode == PerfMemory::PERF_MODE_RW) { #ifdef LATER mmap_prot = PROT_READ | PROT_WRITE; - file_flags = O_RDWR; + file_flags = O_RDWR | O_NOFOLLOW; #else THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Unsupported access mode"); @@ -853,6 +1165,9 @@ // if (!is_directory_secure(dirname)) { FREE_C_HEAP_ARRAY(char, dirname, mtInternal); + if (luser != user) { + FREE_C_HEAP_ARRAY(char, luser, mtInternal); + } THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Process not found"); } @@ -897,6 +1212,9 @@ "Could not map PerfMemory"); } + // It does not go through os api, the operation has to record from here. + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC, mtInternal); + *addr = mapAddress; *sizep = size; --- ./hotspot/src/os/aix/vm/thread_aix.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/aix/vm/thread_aix.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,15 +26,9 @@ #ifndef OS_AIX_VM_THREAD_AIX_INLINE_HPP #define OS_AIX_VM_THREAD_AIX_INLINE_HPP -#include "runtime/atomic.hpp" -#include "runtime/prefetch.hpp" #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp" -#include "atomic_aix_ppc.inline.hpp" -#include "orderAccess_aix_ppc.inline.hpp" -#include "prefetch_aix_ppc.inline.hpp" - // Contains inlined functions for class Thread and ThreadLocalStorage inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do --- ./hotspot/src/os/bsd/dtrace/libjvm_db.c Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/bsd/dtrace/libjvm_db.c Wed Feb 04 12:14:39 2015 -0800 @@ -260,6 +260,9 @@ uint64_t base; int err; + /* Clear *vmp now in case we jump to fail: */ + memset(vmp, 0, sizeof(VMStructEntry)); + err = ps_pglobal_lookup(J->P, LIBJVM_SO, "gHotSpotVMStructs", &sym_addr); CHECK_FAIL(err); err = read_pointer(J, sym_addr, &gHotSpotVMStructs); --- ./hotspot/src/os/bsd/vm/os_bsd.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/bsd/vm/os_bsd.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -48,6 +48,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" #include "runtime/sharedRuntime.hpp" @@ -2433,23 +2434,25 @@ } // The memory is committed - MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC); return addr; } bool os::release_memory_special(char* base, size_t bytes) { - MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); - // detaching the SHM segment will also delete it, see reserve_memory_special() - int rslt = shmdt(base); - if (rslt == 0) { - tkr.record((address)base, bytes); - return true; + if (MemTracker::tracking_level() > NMT_minimal) { + Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); + // detaching the SHM segment will also delete it, see reserve_memory_special() + int rslt = shmdt(base); + if (rslt == 0) { + tkr.record((address)base, bytes); + return true; + } else { + return false; + } } else { - tkr.discard(); - return false; + return shmdt(base) == 0; } - } size_t os::large_page_size() { --- ./hotspot/src/os/bsd/vm/os_bsd.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/bsd/vm/os_bsd.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,15 +26,9 @@ #define OS_BSD_VM_OS_BSD_INLINE_HPP #include "runtime/atomic.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" -#ifdef TARGET_OS_ARCH_bsd_x86 -# include "orderAccess_bsd_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_zero -# include "orderAccess_bsd_zero.inline.hpp" -#endif - // System includes #include --- ./hotspot/src/os/bsd/vm/perfMemory_bsd.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/bsd/vm/perfMemory_bsd.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -962,7 +962,7 @@ (void)::memset((void*) mapAddress, 0, size); // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal); return mapAddress; } @@ -1127,7 +1127,7 @@ } // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal); *addr = mapAddress; *sizep = size; --- ./hotspot/src/os/bsd/vm/thread_bsd.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/bsd/vm/thread_bsd.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,20 +29,8 @@ #error "This file should only be included from thread.inline.hpp" #endif -#include "runtime/atomic.hpp" -#include "runtime/prefetch.hpp" #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp" -#ifdef TARGET_OS_ARCH_bsd_x86 -# include "atomic_bsd_x86.inline.hpp" -# include "orderAccess_bsd_x86.inline.hpp" -# include "prefetch_bsd_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_zero -# include "atomic_bsd_zero.inline.hpp" -# include "orderAccess_bsd_zero.inline.hpp" -# include "prefetch_bsd_zero.inline.hpp" -#endif // Contains inlined functions for class Thread and ThreadLocalStorage --- ./hotspot/src/os/linux/vm/os_linux.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/linux/vm/os_linux.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -49,6 +49,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" #include "runtime/sharedRuntime.hpp" @@ -2243,7 +2244,7 @@ const siginfo_t* si = (const siginfo_t*)siginfo; os::Posix::print_siginfo_brief(st, si); - +#if INCLUDE_CDS if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && UseSharedSpaces) { FileMapInfo* mapinfo = FileMapInfo::current_info(); @@ -2253,6 +2254,7 @@ " possible disk/network problem."); } } +#endif st->cr(); } @@ -3500,9 +3502,12 @@ assert(is_ptr_aligned(start, alignment), "Must be"); - // os::reserve_memory_special will record this memory area. - // Need to release it here to prevent overlapping reservations. - MemTracker::record_virtual_memory_release((address)start, bytes); + if (MemTracker::tracking_level() > NMT_minimal) { + // os::reserve_memory_special will record this memory area. + // Need to release it here to prevent overlapping reservations. + Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); + tkr.record((address)start, bytes); + } char* end = start + bytes; @@ -3597,7 +3602,7 @@ } // The memory is committed - MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC); } return addr; @@ -3613,24 +3618,30 @@ } bool os::release_memory_special(char* base, size_t bytes) { + bool res; + if (MemTracker::tracking_level() > NMT_minimal) { + Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); + res = os::Linux::release_memory_special_impl(base, bytes); + if (res) { + tkr.record((address)base, bytes); + } + + } else { + res = os::Linux::release_memory_special_impl(base, bytes); + } + return res; +} + +bool os::Linux::release_memory_special_impl(char* base, size_t bytes) { assert(UseLargePages, "only for large pages"); - - MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); - bool res; + if (UseSHM) { res = os::Linux::release_memory_special_shm(base, bytes); } else { assert(UseHugeTLBFS, "must be"); res = os::Linux::release_memory_special_huge_tlbfs(base, bytes); } - - if (res) { - tkr.record((address)base, bytes); - } else { - tkr.discard(); - } - return res; } --- ./hotspot/src/os/linux/vm/os_linux.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/linux/vm/os_linux.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -108,6 +108,7 @@ static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec); static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec); + static bool release_memory_special_impl(char* base, size_t bytes); static bool release_memory_special_shm(char* base, size_t bytes); static bool release_memory_special_huge_tlbfs(char* base, size_t bytes); --- ./hotspot/src/os/linux/vm/os_linux.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/linux/vm/os_linux.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,24 +26,9 @@ #define OS_LINUX_VM_OS_LINUX_INLINE_HPP #include "runtime/atomic.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" -#ifdef TARGET_OS_ARCH_linux_x86 -# include "orderAccess_linux_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_sparc -# include "orderAccess_linux_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_zero -# include "orderAccess_linux_zero.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_arm -# include "orderAccess_linux_arm.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_ppc -# include "orderAccess_linux_ppc.inline.hpp" -#endif - // System includes #include --- ./hotspot/src/os/linux/vm/perfMemory_linux.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/linux/vm/perfMemory_linux.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -971,7 +971,7 @@ (void)::memset((void*) mapAddress, 0, size); // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal); return mapAddress; } @@ -1142,7 +1142,7 @@ } // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal); *addr = mapAddress; *sizep = size; --- ./hotspot/src/os/linux/vm/thread_linux.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/linux/vm/thread_linux.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,35 +29,8 @@ #error "This file should only be included from thread.inline.hpp" #endif -#include "runtime/atomic.hpp" -#include "runtime/prefetch.hpp" #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp" -#ifdef TARGET_OS_ARCH_linux_x86 -# include "atomic_linux_x86.inline.hpp" -# include "orderAccess_linux_x86.inline.hpp" -# include "prefetch_linux_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_sparc -# include "atomic_linux_sparc.inline.hpp" -# include "orderAccess_linux_sparc.inline.hpp" -# include "prefetch_linux_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_zero -# include "atomic_linux_zero.inline.hpp" -# include "orderAccess_linux_zero.inline.hpp" -# include "prefetch_linux_zero.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_arm -# include "atomic_linux_arm.inline.hpp" -# include "orderAccess_linux_arm.inline.hpp" -# include "prefetch_linux_arm.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_ppc -# include "atomic_linux_ppc.inline.hpp" -# include "orderAccess_linux_ppc.inline.hpp" -# include "prefetch_linux_ppc.inline.hpp" -#endif // Contains inlined functions for class Thread and ThreadLocalStorage --- ./hotspot/src/os/posix/vm/os_posix.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/posix/vm/os_posix.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -74,21 +74,41 @@ VMError::report_coredump_status(buffer, success); } -address os::get_caller_pc(int n) { +int os::get_native_stack(address* stack, int frames, int toSkip) { #ifdef _NMT_NOINLINE_ - n ++; + toSkip++; #endif + + int frame_idx = 0; + int num_of_frames; // number of frames captured frame fr = os::current_frame(); - while (n > 0 && fr.pc() && - !os::is_first_C_frame(&fr) && fr.sender_pc()) { - fr = os::get_sender_for_C_frame(&fr); - n --; + while (fr.pc() && frame_idx < frames) { + if (toSkip > 0) { + toSkip --; + } else { + stack[frame_idx ++] = fr.pc(); + } + if (fr.fp() == NULL || os::is_first_C_frame(&fr) + ||fr.sender_pc() == NULL || fr.cb() != NULL) break; + + if (fr.sender_pc() && !os::is_first_C_frame(&fr)) { + fr = os::get_sender_for_C_frame(&fr); + } else { + break; + } } - if (n == 0) { - return fr.pc(); - } else { - return NULL; + num_of_frames = frame_idx; + for (; frame_idx < frames; frame_idx ++) { + stack[frame_idx] = NULL; } + + return num_of_frames; +} + + +bool os::unsetenv(const char* name) { + assert(name != NULL, "Null pointer"); + return (::unsetenv(name) == 0); } int os::get_last_error() { --- ./hotspot/src/os/solaris/add_gnu_debuglink/add_gnu_debuglink.c Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,285 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/* - * Name: add_gnu_debuglink.c - * - * Description: Add a ".gnu_debuglink" section that refers to the specified - * debug_info_path to the specified ELF object. - * - * This program is adapted from the example program shown on the - * elf(3elf) man page and from code from the Solaris compiler - * driver. - */ - -/* - * needed to define SHF_EXCLUDE - */ -#define ELF_TARGET_ALL - -#include -#include -#include -#include -#include -#include - -static void failure(void); -static unsigned int gnu_debuglink_crc32(unsigned int crc, unsigned char *buf, - size_t len); - -void -main(int argc, char ** argv) { - /* new ELF section name */ - static char SEC_NAME[] = ".gnu_debuglink"; - - unsigned char buffer[8 * 1024]; /* I/O buffer */ - int buffer_len; /* buffer length */ - char * debug_info_path; /* debug info path */ - void * ehdr; /* ELF header */ - Elf * elf; /* ELF descriptor */ - char * elf_ident; /* ELF identity string */ - char * elf_obj; /* elf_obj file */ - int fd; /* descriptor for files */ - unsigned int file_crc = 0; /* CRC for debug info file */ - int is_elfclass64; /* is an ELFCLASS64 file? */ - Elf_Data * link_dat; /* ELF data for new debug info link */ - Elf_Data * name_dat; /* ELF data for new section name */ - Elf_Scn * new_scn; /* new ELF section descriptor */ - void * new_shdr; /* new ELF section header */ - Elf_Scn * scn; /* ELF section descriptor */ - void * shdr; /* ELF section header */ - - if (argc != 3) { - (void) fprintf(stderr, "Usage: %s debug_info_path elf_obj\n", argv[0]); - exit(2); - } - - debug_info_path = argv[1]; /* save for later */ - if ((fd = open(debug_info_path, O_RDONLY)) == -1) { - (void) fprintf(stderr, "%s: cannot open file.\n", debug_info_path); - exit(3); - } - - (void) printf("Computing CRC for '%s'\n", debug_info_path); - (void) fflush(stdout); - /* compute CRC for the debug info file */ - for (;;) { - int len = read(fd, buffer, sizeof buffer); - if (len <= 0) { - break; - } - file_crc = gnu_debuglink_crc32(file_crc, buffer, len); - } - (void) close(fd); - - /* open the elf_obj */ - elf_obj = argv[2]; - if ((fd = open(elf_obj, O_RDWR)) == -1) { - (void) fprintf(stderr, "%s: cannot open file.\n", elf_obj); - exit(4); - } - - (void) printf("Opening '%s' for update\n", elf_obj); - (void) fflush(stdout); - (void) elf_version(EV_CURRENT); /* coordinate ELF versions */ - - /* obtain the ELF descriptors from the input file */ - if ((elf = elf_begin(fd, ELF_C_RDWR, NULL)) == NULL) { - failure(); - } - - /* determine if ELFCLASS64 or not? */ - elf_ident = elf_getident(elf, NULL); - is_elfclass64 = (elf_ident[EI_CLASS] == ELFCLASS64); - - /* get the ELF header */ - if (is_elfclass64) { - ehdr = elf64_getehdr(elf); - } else { - ehdr = elf32_getehdr(elf); - } - if (ehdr == NULL) { - failure(); - } - - /* get the ELF section descriptor */ - if (is_elfclass64) { - scn = elf_getscn(elf, ((Elf64_Ehdr *) ehdr)->e_shstrndx); - } else { - scn = elf_getscn(elf, ((Elf32_Ehdr *) ehdr)->e_shstrndx); - } - if (scn == NULL) { - failure(); - } - - /* get the section header */ - if (is_elfclass64) { - shdr = elf64_getshdr(scn); - } else { - shdr = elf32_getshdr(scn); - } - if (shdr == NULL) { - failure(); - } - - (void) printf("Adding ELF data for new section name\n"); - (void) fflush(stdout); - name_dat = elf_newdata(scn); - name_dat->d_buf = (void *) SEC_NAME; - if (is_elfclass64) { - name_dat->d_off = ((Elf64_Shdr *) shdr)->sh_size + 1; - } else { - name_dat->d_off = ((Elf32_Shdr *) shdr)->sh_size + 1; - } - name_dat->d_align = 1; - name_dat->d_size = strlen(SEC_NAME) + 1; - - new_scn = elf_newscn(elf); - - if (is_elfclass64) { - new_shdr = elf64_getshdr(new_scn); - ((Elf64_Shdr *) new_shdr)->sh_flags = SHF_EXCLUDE; - ((Elf64_Shdr *) new_shdr)->sh_type = SHT_PROGBITS; - ((Elf64_Shdr *) new_shdr)->sh_name = ((Elf64_Shdr *) shdr)->sh_size; - ((Elf64_Shdr *) new_shdr)->sh_addralign = 1; - ((Elf64_Shdr *) shdr)->sh_size += (strlen(SEC_NAME) + 1); - } else { - new_shdr = elf32_getshdr(new_scn); - ((Elf32_Shdr *) new_shdr)->sh_flags = SHF_EXCLUDE; - ((Elf32_Shdr *) new_shdr)->sh_type = SHT_PROGBITS; - ((Elf32_Shdr *) new_shdr)->sh_name = ((Elf32_Shdr *) shdr)->sh_size; - ((Elf32_Shdr *) new_shdr)->sh_addralign = 1; - ((Elf32_Shdr *) shdr)->sh_size += (strlen(SEC_NAME) + 1); - } - - (void) printf("Adding ELF data for debug_info_path value\n"); - (void) fflush(stdout); - (void) memset(buffer, 0, sizeof buffer); - buffer_len = strlen(debug_info_path) + 1; /* +1 for NUL */ - (void) strncpy((char *) buffer, debug_info_path, buffer_len); - if (buffer_len % 4 != 0) { - /* not on a 4 byte boundary so pad to the next one */ - buffer_len += (4 - buffer_len % 4); - } - /* save the CRC */ - (void) memcpy(&buffer[buffer_len], &file_crc, sizeof file_crc); - buffer_len += sizeof file_crc; - - link_dat = elf_newdata(new_scn); - link_dat->d_type = ELF_T_BYTE; - link_dat->d_size = buffer_len; - link_dat->d_buf = buffer; - link_dat->d_align = 1; - - (void) printf("Saving updates to '%s'\n", elf_obj); - (void) fflush(stdout); - (void) elf_update(elf, ELF_C_NULL); /* recalc ELF memory structures */ - (void) elf_update(elf, ELF_C_WRITE); /* write out changes to ELF obj */ - (void) elf_end(elf); /* done with ELF obj */ - (void) close(fd); - - (void) printf("Done updating '%s'\n", elf_obj); - (void) fflush(stdout); - exit(0); -} /* end main */ - - -static void -failure() { - (void) fprintf(stderr, "%s\n", elf_errmsg(elf_errno())); - exit(5); -} - - -/* - * The CRC used in gnu_debuglink, retrieved from - * http://sourceware.org/gdb/current/onlinedocs/gdb/Separate-Debug-Files.html#Separate-Debug-Files. - */ - -static unsigned int -gnu_debuglink_crc32(unsigned int crc, unsigned char *buf, size_t len) { - static const unsigned int crc32_table[256] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, - 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, - 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, - 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, - 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, - 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, - 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, - 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, - 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, - 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, - 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, - 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, - 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, - 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, - 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, - 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, - 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, - 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, - 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, - 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, - 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, - 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, - 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, - 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, - 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, - 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, - 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, - 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, - 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, - 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, - 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, - 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, - 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, - 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, - 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, - 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, - 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, - 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, - 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, - 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, - 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, - 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, - 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, - 0x2d02ef8d - }; - - unsigned char *end; - - crc = ~crc & 0xffffffff; - for (end = buf + len; buf < end; ++buf) { - crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8); - } - return ~crc & 0xffffffff; -} --- ./hotspot/src/os/solaris/dtrace/libjvm_db.c Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/solaris/dtrace/libjvm_db.c Wed Feb 04 12:14:39 2015 -0800 @@ -260,6 +260,9 @@ uint64_t base; int err; + /* Clear *vmp now in case we jump to fail: */ + memset(vmp, 0, sizeof(VMStructEntry)); + err = ps_pglobal_lookup(J->P, LIBJVM_SO, "gHotSpotVMStructs", &sym_addr); CHECK_FAIL(err); err = read_pointer(J, sym_addr, &gHotSpotVMStructs); --- ./hotspot/src/os/solaris/fix_empty_sec_hdr_flags/fix_empty_sec_hdr_flags.c Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,181 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/* - * Name: fix_empty_sec_hdr_flags.c - * - * Description: Remove the SHF_ALLOC flag from "empty" section headers. - * An "empty" section header has sh_addr == 0 and sh_size == 0. - * - * This program is adapted from the example program shown on the - * elf(3elf) man page and from code from the Solaris compiler - * driver. - */ - -#include -#include -#include -#include -#include -#include - -static void failure(void); - -void -main(int argc, char ** argv) { - void * ehdr; /* ELF header */ - unsigned int i; /* section counter */ - int fd; /* descriptor for file */ - Elf * elf; /* ELF descriptor */ - char * elf_ident; /* ELF identity string */ - char * elf_obj; /* elf_obj file */ - int fix_count; /* number of flags fixed */ - int is_elfclass64; /* is an ELFCLASS64 file? */ - Elf_Scn * scn; /* ELF section descriptor */ - void * shdr; /* ELF section header */ - Elf_Data * shstrtab; /* ELF section header string table */ - - if (argc != 2) { - (void) fprintf(stderr, "Usage: %s elf_obj\n", argv[0]); - exit(2); - } - - /* open the elf_obj */ - elf_obj = argv[1]; - if ((fd = open(elf_obj, O_RDWR)) == -1) { - (void) fprintf(stderr, "%s: cannot open file.\n", elf_obj); - exit(3); - } - - (void) printf("Opening '%s' for update\n", elf_obj); - (void) fflush(stdout); - (void) elf_version(EV_CURRENT); /* coordinate ELF versions */ - - /* obtain the ELF descriptors from the input file */ - if ((elf = elf_begin(fd, ELF_C_RDWR, NULL)) == NULL) { - failure(); - } - - /* determine if ELFCLASS64 or not? */ - elf_ident = elf_getident(elf, NULL); - is_elfclass64 = (elf_ident[EI_CLASS] == ELFCLASS64); - - /* get the ELF header */ - if (is_elfclass64) { - ehdr = elf64_getehdr(elf); - } else { - ehdr = elf32_getehdr(elf); - } - if (ehdr == NULL) { - failure(); - } - - /* get the ELF section descriptor */ - if (is_elfclass64) { - scn = elf_getscn(elf, ((Elf64_Ehdr *) ehdr)->e_shstrndx); - } else { - scn = elf_getscn(elf, ((Elf32_Ehdr *) ehdr)->e_shstrndx); - } - if (scn == NULL) { - failure(); - } - - /* get the section header string table */ - shstrtab = elf_getdata(scn, NULL); - if (shstrtab == NULL) { - failure(); - } - - fix_count = 0; - - /* traverse the sections of the input file */ - for (i = 1, scn = NULL; scn = elf_nextscn(elf, scn); i++) { - int has_flag_set; /* is SHF_ALLOC flag set? */ - int is_empty; /* is section empty? */ - char * name; /* short hand pointer */ - - /* get the section header */ - if (is_elfclass64) { - shdr = elf64_getshdr(scn); - } else { - shdr = elf32_getshdr(scn); - } - if (shdr == NULL) { - failure(); - } - - if (is_elfclass64) { - name = (char *)shstrtab->d_buf + ((Elf64_Shdr *) shdr)->sh_name; - } else { - name = (char *)shstrtab->d_buf + ((Elf32_Shdr *) shdr)->sh_name; - } - - if (is_elfclass64) { - has_flag_set = ((Elf64_Shdr *) shdr)->sh_flags & SHF_ALLOC; - is_empty = ((Elf64_Shdr *) shdr)->sh_addr == 0 && - ((Elf64_Shdr *) shdr)->sh_size == 0; - } else { - has_flag_set = ((Elf32_Shdr *) shdr)->sh_flags & SHF_ALLOC; - is_empty = ((Elf32_Shdr *) shdr)->sh_addr == 0 && - ((Elf32_Shdr *) shdr)->sh_size == 0; - } - - if (is_empty && has_flag_set) { - (void) printf("section[%u] '%s' is empty, " - "but SHF_ALLOC flag is set.\n", i, name); - (void) printf("Clearing the SHF_ALLOC flag.\n"); - - if (is_elfclass64) { - ((Elf64_Shdr *) shdr)->sh_flags &= ~SHF_ALLOC; - } else { - ((Elf32_Shdr *) shdr)->sh_flags &= ~SHF_ALLOC; - } - fix_count++; - } - } /* end for each ELF section */ - - if (fix_count > 0) { - (void) printf("Saving %d updates to '%s'\n", fix_count, elf_obj); - (void) fflush(stdout); - (void) elf_update(elf, ELF_C_NULL); /* recalc ELF memory structures */ - (void) elf_update(elf, ELF_C_WRITE); /* write out changes to ELF obj */ - } else { - (void) printf("No SHF_ALLOC flags needed to be cleared.\n"); - } - - (void) elf_end(elf); /* done with ELF obj */ - (void) close(fd); - - (void) printf("Done %s '%s'\n", - (fix_count > 0) ? "updating" : "with", elf_obj); - (void) fflush(stdout); - exit(0); -} /* end main */ - - -static void -failure() { - (void) fprintf(stderr, "%s\n", elf_errmsg(elf_errno())); - exit(6); -} --- ./hotspot/src/os/solaris/vm/os_solaris.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/solaris/vm/os_solaris.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -48,6 +48,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" #include "runtime/sharedRuntime.hpp" --- ./hotspot/src/os/solaris/vm/os_solaris.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/solaris/vm/os_solaris.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,15 +26,9 @@ #define OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP #include "runtime/atomic.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" -#ifdef TARGET_OS_ARCH_solaris_x86 -# include "orderAccess_solaris_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_sparc -# include "orderAccess_solaris_sparc.inline.hpp" -#endif - // System includes #include #include --- ./hotspot/src/os/solaris/vm/perfMemory_solaris.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/solaris/vm/perfMemory_solaris.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -991,7 +991,8 @@ (void)::memset((void*) mapAddress, 0, size); // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, + size, CURRENT_PC, mtInternal); return mapAddress; } @@ -1162,7 +1163,8 @@ } // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, + size, CURRENT_PC, mtInternal); *addr = mapAddress; *sizep = size; --- ./hotspot/src/os/solaris/vm/thread_solaris.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/solaris/vm/thread_solaris.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,20 +29,9 @@ #error "This file should only be included from thread.inline.hpp" #endif -#include "runtime/atomic.hpp" -#include "runtime/prefetch.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp" -#ifdef TARGET_OS_ARCH_solaris_x86 -# include "atomic_solaris_x86.inline.hpp" -# include "orderAccess_solaris_x86.inline.hpp" -# include "prefetch_solaris_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_sparc -# include "atomic_solaris_sparc.inline.hpp" -# include "orderAccess_solaris_sparc.inline.hpp" -# include "prefetch_solaris_sparc.inline.hpp" -#endif // Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of // startup. --- ./hotspot/src/os/windows/vm/os_windows.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/windows/vm/os_windows.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -51,6 +51,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" #include "runtime/sharedRuntime.hpp" @@ -130,6 +131,7 @@ case DLL_PROCESS_DETACH: if(ForceTimeHighResolution) timeEndPeriod(1L); + break; default: break; @@ -152,6 +154,10 @@ return result > 0 && result < len; } +bool os::unsetenv(const char* name) { + assert(name != NULL, "Null pointer"); + return (SetEnvironmentVariable(name, NULL) == TRUE); +} // No setuid programs under Windows. bool os::have_special_privileges() { @@ -310,15 +316,17 @@ * So far, this method is only used by Native Memory Tracking, which is * only supported on Windows XP or later. */ -address os::get_caller_pc(int n) { + +int os::get_native_stack(address* stack, int frames, int toSkip) { #ifdef _NMT_NOINLINE_ - n ++; + toSkip ++; #endif - address pc; - if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) { - return pc; - } - return NULL; + int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, + (PVOID*)stack, NULL); + for (int index = captured; index < frames; index ++) { + stack[index] = NULL; + } + return captured; } @@ -1642,96 +1650,123 @@ void os::win32::print_windows_version(outputStream* st) { OSVERSIONINFOEX osvi; - SYSTEM_INFO si; - + VS_FIXEDFILEINFO *file_info; + TCHAR kernel32_path[MAX_PATH]; + UINT len, ret; + + // Use the GetVersionEx information to see if we're on a server or + // workstation edition of Windows. Starting with Windows 8.1 we can't + // trust the OS version information returned by this API. ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); - if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { - st->print_cr("N/A"); + st->print_cr("Call to GetVersionEx failed"); return; } - - int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; - + bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); + + // Get the full path to \Windows\System32\kernel32.dll and use that for + // determining what version of Windows we're running on. + len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; + ret = GetSystemDirectory(kernel32_path, len); + if (ret == 0 || ret > len) { + st->print_cr("Call to GetSystemDirectory failed"); + return; + } + strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); + + DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); + if (version_size == 0) { + st->print_cr("Call to GetFileVersionInfoSize failed"); + return; + } + + LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); + if (version_info == NULL) { + st->print_cr("Failed to allocate version_info"); + return; + } + + if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { + os::free(version_info); + st->print_cr("Call to GetFileVersionInfo failed"); + return; + } + + if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { + os::free(version_info); + st->print_cr("Call to VerQueryValue failed"); + return; + } + + int major_version = HIWORD(file_info->dwProductVersionMS); + int minor_version = LOWORD(file_info->dwProductVersionMS); + int build_number = HIWORD(file_info->dwProductVersionLS); + int build_minor = LOWORD(file_info->dwProductVersionLS); + int os_vers = major_version * 1000 + minor_version; + os::free(version_info); + + st->print(" Windows "); + switch (os_vers) { + + case 6000: + if (is_workstation) { + st->print("Vista"); + } else { + st->print("Server 2008"); + } + break; + + case 6001: + if (is_workstation) { + st->print("7"); + } else { + st->print("Server 2008 R2"); + } + break; + + case 6002: + if (is_workstation) { + st->print("8"); + } else { + st->print("Server 2012"); + } + break; + + case 6003: + if (is_workstation) { + st->print("8.1"); + } else { + st->print("Server 2012 R2"); + } + break; + + case 6004: + if (is_workstation) { + st->print("10"); + } else { + // The server version name of Windows 10 is not known at this time + st->print("%d.%d", major_version, minor_version); + } + break; + + default: + // Unrecognized windows, print out its major and minor versions + st->print("%d.%d", major_version, minor_version); + break; + } + + // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could + // find out whether we are running on 64 bit processor or not + SYSTEM_INFO si; ZeroMemory(&si, sizeof(SYSTEM_INFO)); - if (os_vers >= 5002) { - // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could - // find out whether we are running on 64 bit processor or not. - if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) { - os::Kernel32Dll::GetNativeSystemInfo(&si); - } else { - GetSystemInfo(&si); - } - } - - if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { - switch (os_vers) { - case 3051: st->print(" Windows NT 3.51"); break; - case 4000: st->print(" Windows NT 4.0"); break; - case 5000: st->print(" Windows 2000"); break; - case 5001: st->print(" Windows XP"); break; - case 5002: - if (osvi.wProductType == VER_NT_WORKSTATION && - si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { - st->print(" Windows XP x64 Edition"); - } else { - st->print(" Windows Server 2003 family"); - } - break; - - case 6000: - if (osvi.wProductType == VER_NT_WORKSTATION) { - st->print(" Windows Vista"); - } else { - st->print(" Windows Server 2008"); - } - break; - - case 6001: - if (osvi.wProductType == VER_NT_WORKSTATION) { - st->print(" Windows 7"); - } else { - st->print(" Windows Server 2008 R2"); - } - break; - - case 6002: - if (osvi.wProductType == VER_NT_WORKSTATION) { - st->print(" Windows 8"); - } else { - st->print(" Windows Server 2012"); - } - break; - - case 6003: - if (osvi.wProductType == VER_NT_WORKSTATION) { - st->print(" Windows 8.1"); - } else { - st->print(" Windows Server 2012 R2"); - } - break; - - default: // future os - // Unrecognized windows, print out its major and minor versions - st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); - } - } else { - switch (os_vers) { - case 4000: st->print(" Windows 95"); break; - case 4010: st->print(" Windows 98"); break; - case 4090: st->print(" Windows Me"); break; - default: // future windows, print out its major and minor versions - st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); - } - } - - if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { + os::Kernel32Dll::GetNativeSystemInfo(&si); + if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { st->print(" , 64 bit"); } - st->print(" Build %d", osvi.dwBuildNumber); - st->print(" %s", osvi.szCSDVersion); // service pack + st->print(" Build %d", build_number); + st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); st->cr(); } @@ -2903,7 +2938,7 @@ PAGE_READWRITE); // If reservation failed, return NULL if (p_buf == NULL) return NULL; - MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC); + MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); os::release_memory(p_buf, bytes + chunk_size); // we still need to round up to a page boundary (in case we are using large pages) @@ -2969,7 +3004,7 @@ // need to create a dummy 'reserve' record to match // the release. MemTracker::record_virtual_memory_reserve((address)p_buf, - bytes_to_release, mtNone, CALLER_PC); + bytes_to_release, CALLER_PC); os::release_memory(p_buf, bytes_to_release); } #ifdef ASSERT @@ -2988,11 +3023,10 @@ } // Although the memory is allocated individually, it is returned as one. // NMT records it as one block. - address pc = CALLER_PC; if ((flags & MEM_COMMIT) != 0) { - MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc); + MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); } else { - MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc); + MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); } // made it this far, success @@ -3190,8 +3224,7 @@ DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); if (res != NULL) { - address pc = CALLER_PC; - MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc); + MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); } return res; @@ -5344,11 +5377,6 @@ return ::Module32Next(hSnapshot, lpme); } - -inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { - return true; -} - inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { ::GetNativeSystemInfo(lpSystemInfo); } --- ./hotspot/src/os/windows/vm/os_windows.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/windows/vm/os_windows.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -192,7 +192,6 @@ static BOOL Module32First(HANDLE,LPMODULEENTRY32); static BOOL Module32Next(HANDLE,LPMODULEENTRY32); - static BOOL GetNativeSystemInfoAvailable(); static void GetNativeSystemInfo(LPSYSTEM_INFO); // NUMA calls --- ./hotspot/src/os/windows/vm/os_windows.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/windows/vm/os_windows.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,12 +26,9 @@ #define OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP #include "runtime/atomic.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" -#ifdef TARGET_OS_ARCH_windows_x86 -# include "orderAccess_windows_x86.inline.hpp" -#endif - inline const char* os::file_separator() { return "\\"; } inline const char* os::line_separator() { return "\r\n"; } inline const char* os::path_separator() { return ";"; } --- ./hotspot/src/os/windows/vm/perfMemory_windows.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/windows/vm/perfMemory_windows.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1498,7 +1498,8 @@ (void)memset(mapAddress, '\0', size); // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, + size, CURRENT_PC, mtInternal); return (char*) mapAddress; } @@ -1680,7 +1681,8 @@ } // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, + CURRENT_PC, mtInternal); *addrp = (char*)mapAddress; @@ -1834,10 +1836,14 @@ return; } - MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); - remove_file_mapping(addr); - // it does not go through os api, the operation has to record from here - tkr.record((address)addr, bytes); + if (MemTracker::tracking_level() > NMT_minimal) { + // it does not go through os api, the operation has to record from here + Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); + remove_file_mapping(addr); + tkr.record((address)addr, bytes); + } else { + remove_file_mapping(addr); + } } char* PerfMemory::backing_store_filename() { --- ./hotspot/src/os/windows/vm/thread_windows.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os/windows/vm/thread_windows.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,15 +29,8 @@ #error "This file should only be included from thread.inline.hpp" #endif -#include "runtime/atomic.hpp" -#include "runtime/prefetch.hpp" #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp" -#ifdef TARGET_OS_ARCH_windows_x86 -# include "atomic_windows_x86.inline.hpp" -# include "orderAccess_windows_x86.inline.hpp" -# include "prefetch_windows_x86.inline.hpp" -#endif // Contains inlined functions for class Thread and ThreadLocalStorage --- ./hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,7 +26,6 @@ #ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP #define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP -#include "orderAccess_aix_ppc.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "vm_version_ppc.hpp" --- ./hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -91,8 +91,9 @@ // Frame information (pc, sp, fp) retrieved via ucontext // always looks like a C-frame according to the frame -// conventions in frame_ppc64.hpp. -address os::Aix::ucontext_get_pc(ucontext_t * uc) { +// conventions in frame_ppc.hpp. + +address os::Aix::ucontext_get_pc(const ucontext_t * uc) { return (address)uc->uc_mcontext.jmp_context.iar; } @@ -486,7 +487,7 @@ //////////////////////////////////////////////////////////////////////////////// // thread stack -size_t os::Aix::min_stack_allowed = 768*K; +size_t os::Aix::min_stack_allowed = 128*K; // Aix is always in floating stack mode. The stack size for a new // thread can be set via pthread_attr_setstacksize(). @@ -499,7 +500,7 @@ // because of the strange 'fallback logic' in os::create_thread(). // Better set CompilerThreadStackSize in globals_.hpp if you want to // specify a different stack size for compiler threads! - size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K); + size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); return s; } --- ./hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -23,8 +23,8 @@ * */ -#ifndef OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP -#define OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP +#ifndef OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP +#define OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP static void setup_fpu() {} @@ -32,4 +32,4 @@ // Note: Currently only used in 64 bit Windows implementations static bool register_code_area(char *low, char *high) { return true; } -#endif // OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP +#endif // OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP --- ./hotspot/src/os_cpu/aix_ppc/vm/prefetch_aix_ppc.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/aix_ppc/vm/prefetch_aix_ppc.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -23,8 +23,8 @@ * */ -#ifndef OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP -#define OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP +#ifndef OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP +#define OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP #include "runtime/prefetch.hpp" @@ -55,4 +55,4 @@ #endif } -#endif // OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP +#endif // OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP --- ./hotspot/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -23,8 +23,8 @@ * */ -#ifndef OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP -#define OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP +#ifndef OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP +#define OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP // Processor dependent parts of ThreadLocalStorage @@ -33,4 +33,4 @@ return (Thread *) os::thread_local_storage_at(thread_index()); } -#endif // OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP +#endif // OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP --- ./hotspot/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -23,8 +23,8 @@ * */ -#ifndef OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP -#define OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP +#ifndef OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP +#define OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP private: void pd_initialize() { @@ -76,4 +76,4 @@ intptr_t* last_interpreter_fp() { return _last_interpreter_fp; } -#endif // OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP +#endif // OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP --- ./hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,7 +26,6 @@ #ifndef OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP #define OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP -#include "orderAccess_linux_ppc.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "vm_version_ppc.hpp" --- ./hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,6 +1,6 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. - * Copyright 2012, 2013 SAP AG. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -307,7 +307,7 @@ // doesn't work for us. We use: ((NativeInstruction*)pc)->is_safepoint_poll()) { if (TraceTraps) { - tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc); + tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc)); } stub = SharedRuntime::get_poll_stub(pc); } @@ -316,7 +316,7 @@ else if (sig == SIGTRAP && TrapBasedICMissChecks && nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) { if (TraceTraps) { - tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc); + tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc)); } stub = SharedRuntime::get_ic_miss_stub(); } @@ -325,7 +325,7 @@ else if (sig == SIGTRAP && TrapBasedNullChecks && nativeInstruction_at(pc)->is_sigtrap_null_check()) { if (TraceTraps) { - tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc); + tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc)); } stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); } @@ -335,7 +335,7 @@ CodeCache::contains((void*) pc) && !MacroAssembler::needs_explicit_null_check((intptr_t) info->si_addr)) { if (TraceTraps) { - tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc); + tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc)); } stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); } @@ -345,7 +345,7 @@ else if (sig == SIGTRAP && TrapBasedRangeChecks && nativeInstruction_at(pc)->is_sigtrap_range_check()) { if (TraceTraps) { - tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc); + tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc)); } stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); } @@ -453,7 +453,7 @@ //////////////////////////////////////////////////////////////////////////////// // thread stack -size_t os::Linux::min_stack_allowed = 768*K; +size_t os::Linux::min_stack_allowed = 128*K; bool os::Linux::supports_variable_stack_size() { return true; } @@ -572,7 +572,7 @@ st->cr(); intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); - st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); + st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp)); print_hex_dump(st, (address)sp, (address)(sp + 128), sizeof(intptr_t)); st->cr(); @@ -580,7 +580,7 @@ // point to garbage if entry point in an nmethod is corrupted. Leave // this at the end, and hope for the best. address pc = os::Linux::ucontext_get_pc(uc); - st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); + st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc)); print_hex_dump(st, pc - 64, pc + 64, /*instrsize=*/4); st->cr(); } --- ./hotspot/src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -47,4 +47,4 @@ ); } -#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_OJDKPPC_HPP +#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP --- ./hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -118,7 +118,7 @@ *ret_sp = os::Linux::ucontext_get_sp(uc); } if (ret_fp) { - *ret_fp = os::Linux::ucontext_get_fp(uc); + *ret_fp = (intptr_t*)NULL; } } else { // construct empty ExtendedPC for return value checking @@ -136,18 +136,15 @@ frame os::fetch_frame_from_context(void* ucVoid) { intptr_t* sp; - intptr_t* fp; - ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); - return frame(sp, fp, epc.pc()); + ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, NULL); + return frame(sp, frame::unpatchable, epc.pc()); } frame os::get_sender_for_C_frame(frame* fr) { - return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); + return frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc()); } frame os::current_frame() { - fprintf(stderr, "current_frame()"); - intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()(); frame myframe(sp, frame::unpatchable, CAST_FROM_FN_PTR(address, os::current_frame)); --- ./hotspot/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -55,7 +55,7 @@ if (detect_niagara()) { NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on Niagara");) - features = niagara1_m; + features = niagara1_m | T_family_m; } return features; --- ./hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,10 +26,216 @@ #include "runtime/os.hpp" #include "vm_version_sparc.hpp" -# include -# include -# include -# include +#include +#include +#include +#include +#include +#include +#include + +extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result); +extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result); + +// Functions from the library we need (signatures should match those in picl.h) +extern "C" { + typedef int (*picl_initialize_func_t)(void); + typedef int (*picl_shutdown_func_t)(void); + typedef int (*picl_get_root_func_t)(picl_nodehdl_t *nodehandle); + typedef int (*picl_walk_tree_by_class_func_t)(picl_nodehdl_t rooth, + const char *classname, void *c_args, + int (*callback_fn)(picl_nodehdl_t hdl, void *args)); + typedef int (*picl_get_prop_by_name_func_t)(picl_nodehdl_t nodeh, const char *nm, + picl_prophdl_t *ph); + typedef int (*picl_get_propval_func_t)(picl_prophdl_t proph, void *valbuf, size_t sz); + typedef int (*picl_get_propinfo_func_t)(picl_prophdl_t proph, picl_propinfo_t *pi); +} + +class PICL { + // Pointers to functions in the library + picl_initialize_func_t _picl_initialize; + picl_shutdown_func_t _picl_shutdown; + picl_get_root_func_t _picl_get_root; + picl_walk_tree_by_class_func_t _picl_walk_tree_by_class; + picl_get_prop_by_name_func_t _picl_get_prop_by_name; + picl_get_propval_func_t _picl_get_propval; + picl_get_propinfo_func_t _picl_get_propinfo; + // Handle to the library that is returned by dlopen + void *_dl_handle; + + bool open_library(); + void close_library(); + + template bool bind(FuncType& func, const char* name); + bool bind_library_functions(); + + // Get a value of the integer property. The value in the tree can be either 32 or 64 bit + // depending on the platform. The result is converted to int. + int get_int_property(picl_nodehdl_t nodeh, const char* name, int* result) { + picl_propinfo_t pinfo; + picl_prophdl_t proph; + if (_picl_get_prop_by_name(nodeh, name, &proph) != PICL_SUCCESS || + _picl_get_propinfo(proph, &pinfo) != PICL_SUCCESS) { + return PICL_FAILURE; + } + + if (pinfo.type != PICL_PTYPE_INT && pinfo.type != PICL_PTYPE_UNSIGNED_INT) { + assert(false, "Invalid property type"); + return PICL_FAILURE; + } + if (pinfo.size == sizeof(int64_t)) { + int64_t val; + if (_picl_get_propval(proph, &val, sizeof(int64_t)) != PICL_SUCCESS) { + return PICL_FAILURE; + } + *result = static_cast(val); + } else if (pinfo.size == sizeof(int32_t)) { + int32_t val; + if (_picl_get_propval(proph, &val, sizeof(int32_t)) != PICL_SUCCESS) { + return PICL_FAILURE; + } + *result = static_cast(val); + } else { + assert(false, "Unexpected integer property size"); + return PICL_FAILURE; + } + return PICL_SUCCESS; + } + + // Visitor and a state machine that visits integer properties and verifies that the + // values are the same. Stores the unique value observed. + class UniqueValueVisitor { + PICL *_picl; + enum { + INITIAL, // Start state, no assignments happened + ASSIGNED, // Assigned a value + INCONSISTENT // Inconsistent value seen + } _state; + int _value; + public: + UniqueValueVisitor(PICL* picl) : _picl(picl), _state(INITIAL) { } + int value() { + assert(_state == ASSIGNED, "Precondition"); + return _value; + } + void set_value(int value) { + assert(_state == INITIAL, "Precondition"); + _value = value; + _state = ASSIGNED; + } + bool is_initial() { return _state == INITIAL; } + bool is_assigned() { return _state == ASSIGNED; } + bool is_inconsistent() { return _state == INCONSISTENT; } + void set_inconsistent() { _state = INCONSISTENT; } + + static int visit(picl_nodehdl_t nodeh, const char* name, void *arg) { + UniqueValueVisitor *state = static_cast(arg); + PICL* picl = state->_picl; + assert(!state->is_inconsistent(), "Precondition"); + int curr; + if (picl->get_int_property(nodeh, name, &curr) == PICL_SUCCESS) { + if (!state->is_assigned()) { // first iteration + state->set_value(curr); + } else if (curr != state->value()) { // following iterations + state->set_inconsistent(); + } + } + if (state->is_inconsistent()) { + return PICL_WALK_TERMINATE; + } + return PICL_WALK_CONTINUE; + } + }; + + int _L1_data_cache_line_size; + int _L2_cache_line_size; +public: + static int get_l1_data_cache_line_size(picl_nodehdl_t nodeh, void *state) { + return UniqueValueVisitor::visit(nodeh, "l1-dcache-line-size", state); + } + static int get_l2_cache_line_size(picl_nodehdl_t nodeh, void *state) { + return UniqueValueVisitor::visit(nodeh, "l2-cache-line-size", state); + } + + PICL() : _L1_data_cache_line_size(0), _L2_cache_line_size(0), _dl_handle(NULL) { + if (!open_library()) { + return; + } + if (_picl_initialize() == PICL_SUCCESS) { + picl_nodehdl_t rooth; + if (_picl_get_root(&rooth) == PICL_SUCCESS) { + UniqueValueVisitor L1_state(this); + // Visit all "cpu" class instances + _picl_walk_tree_by_class(rooth, "cpu", &L1_state, PICL_get_l1_data_cache_line_size_helper); + if (L1_state.is_initial()) { // Still initial, iteration found no values + // Try walk all "core" class instances, it might be a Fujitsu machine + _picl_walk_tree_by_class(rooth, "core", &L1_state, PICL_get_l1_data_cache_line_size_helper); + } + if (L1_state.is_assigned()) { // Is there a value? + _L1_data_cache_line_size = L1_state.value(); + } + + UniqueValueVisitor L2_state(this); + _picl_walk_tree_by_class(rooth, "cpu", &L2_state, PICL_get_l2_cache_line_size_helper); + if (L2_state.is_initial()) { + _picl_walk_tree_by_class(rooth, "core", &L2_state, PICL_get_l2_cache_line_size_helper); + } + if (L2_state.is_assigned()) { + _L2_cache_line_size = L2_state.value(); + } + } + _picl_shutdown(); + } + close_library(); + } + + unsigned int L1_data_cache_line_size() const { return _L1_data_cache_line_size; } + unsigned int L2_cache_line_size() const { return _L2_cache_line_size; } +}; + +extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result) { + return PICL::get_l1_data_cache_line_size(nodeh, result); +} +extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result) { + return PICL::get_l2_cache_line_size(nodeh, result); +} + +template +bool PICL::bind(FuncType& func, const char* name) { + func = reinterpret_cast(dlsym(_dl_handle, name)); + return func != NULL; +} + +bool PICL::bind_library_functions() { + assert(_dl_handle != NULL, "library should be open"); + return bind(_picl_initialize, "picl_initialize" ) && + bind(_picl_shutdown, "picl_shutdown" ) && + bind(_picl_get_root, "picl_get_root" ) && + bind(_picl_walk_tree_by_class, "picl_walk_tree_by_class") && + bind(_picl_get_prop_by_name, "picl_get_prop_by_name" ) && + bind(_picl_get_propval, "picl_get_propval" ) && + bind(_picl_get_propinfo, "picl_get_propinfo" ); +} + +bool PICL::open_library() { + _dl_handle = dlopen("libpicl.so.1", RTLD_LAZY); + if (_dl_handle == NULL) { + warning("PICL (libpicl.so.1) is missing. Performance will not be optimal."); + return false; + } + if (!bind_library_functions()) { + assert(false, "unexpected PICL API change"); + close_library(); + return false; + } + return true; +} + +void PICL::close_library() { + assert(_dl_handle != NULL, "library should be open"); + dlclose(_dl_handle); + _dl_handle = NULL; +} // We need to keep these here as long as we have to build on Solaris // versions before 10. @@ -137,6 +343,21 @@ #endif if (av & AV_SPARC_AES) features |= aes_instructions_m; +#ifndef AV_SPARC_SHA1 +#define AV_SPARC_SHA1 0x00400000 /* sha1 instruction supported */ +#endif + if (av & AV_SPARC_SHA1) features |= sha1_instruction_m; + +#ifndef AV_SPARC_SHA256 +#define AV_SPARC_SHA256 0x00800000 /* sha256 instruction supported */ +#endif + if (av & AV_SPARC_SHA256) features |= sha256_instruction_m; + +#ifndef AV_SPARC_SHA512 +#define AV_SPARC_SHA512 0x01000000 /* sha512 instruction supported */ +#endif + if (av & AV_SPARC_SHA512) features |= sha512_instruction_m; + } else { // getisax(2) failed, use the old legacy code. #ifndef PRODUCT @@ -248,5 +469,9 @@ kstat_close(kc); } + // Figure out cache line sizes using PICL + PICL picl; + _L2_cache_line_size = picl.L2_cache_line_size(); + return features; } --- ./hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -635,7 +635,11 @@ #ifndef PRODUCT void os::verify_stack_alignment() { #ifdef AMD64 - assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); + // The current_stack_pointer() calls generated get_previous_sp stub routine. + // Only enable the assert after the routine becomes available. + if (StubRoutines::code1() != NULL) { + assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); + } #endif } #endif --- ./hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java Wed Feb 04 12:14:39 2015 -0800 @@ -406,9 +406,17 @@ } else if (scopes.peek().getCalls().size() > 2 && m == scopes.peek().last(-2).getMethod()) { scopes.push(scopes.peek().last(-2)); } else { - System.out.println(site.getMethod()); - System.out.println(m); - throw new InternalError("call site and parse don't match"); + // C1 prints multiple method tags during inlining when it narrows method being inlinied. + // Example: + // ... + // + // + // + // + // + // ... + site.setMethod(m); + scopes.push(site); } } } else if (qname.equals("parse_done")) { --- ./hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Wed Feb 04 12:14:39 2015 -0800 @@ -398,7 +398,7 @@ "/export:JVM_GetThreadStateNames "+ "/export:JVM_GetThreadStateValues "+ "/export:JVM_InitAgentProperties"); - addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib"); + addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib;version.lib"); addAttr(rv, "OutputFile", outDll); addAttr(rv, "SuppressStartupBanner", "true"); addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def"); --- ./hotspot/src/share/vm/asm/codeBuffer.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/asm/codeBuffer.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -133,6 +133,10 @@ // free any overflow storage delete _overflow_arena; + // Claim is that stack allocation ensures resources are cleaned up. + // This is resource clean up, let's hope that all were properly copied out. + free_strings(); + #ifdef ASSERT // Save allocation type to execute assert in ~ResourceObj() // which is called after this destructor. @@ -268,7 +272,7 @@ GrowableArray* CodeBuffer::create_patch_overflow() { if (_overflow_arena == NULL) { - _overflow_arena = new (mtCode) Arena(); + _overflow_arena = new (mtCode) Arena(mtCode); } return new (_overflow_arena) GrowableArray(_overflow_arena, 8, 0, 0); } @@ -704,7 +708,7 @@ relocate_code_to(&dest); // transfer strings and comments from buffer to blob - dest_blob->set_strings(_strings); + dest_blob->set_strings(_code_strings); // Done moving code bytes; were they the right size? assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity"); @@ -1003,11 +1007,11 @@ void CodeBuffer::block_comment(intptr_t offset, const char * comment) { - _strings.add_comment(offset, comment); + _code_strings.add_comment(offset, comment); } const char* CodeBuffer::code_string(const char* str) { - return _strings.add_string(str); + return _code_strings.add_string(str); } class CodeString: public CHeapObj { @@ -1073,6 +1077,7 @@ } void CodeStrings::add_comment(intptr_t offset, const char * comment) { + check_valid(); CodeString* c = new CodeString(comment, offset); CodeString* inspos = (_strings == NULL) ? NULL : find_last(offset); @@ -1088,11 +1093,32 @@ } void CodeStrings::assign(CodeStrings& other) { + other.check_valid(); + // Cannot do following because CodeStrings constructor is not alway run! + assert(is_null(), "Cannot assign onto non-empty CodeStrings"); _strings = other._strings; + other.set_null_and_invalidate(); +} + +// Deep copy of CodeStrings for consistent memory management. +// Only used for actual disassembly so this is cheaper than reference counting +// for the "normal" fastdebug case. +void CodeStrings::copy(CodeStrings& other) { + other.check_valid(); + check_valid(); + assert(is_null(), "Cannot copy onto non-empty CodeStrings"); + CodeString* n = other._strings; + CodeString** ps = &_strings; + while (n != NULL) { + *ps = new CodeString(n->string(),n->offset()); + ps = &((*ps)->_next); + n = n->next(); + } } void CodeStrings::print_block_comment(outputStream* stream, intptr_t offset) const { - if (_strings != NULL) { + check_valid(); + if (_strings != NULL) { CodeString* c = find(offset); while (c && c->offset() == offset) { stream->bol(); @@ -1103,7 +1129,7 @@ } } - +// Also sets isNull() void CodeStrings::free() { CodeString* n = _strings; while (n) { @@ -1113,10 +1139,11 @@ delete n; n = p; } - _strings = NULL; + set_null_and_invalidate(); } const char* CodeStrings::add_string(const char * string) { + check_valid(); CodeString* s = new CodeString(string); s->set_next(_strings); _strings = s; --- ./hotspot/src/share/vm/asm/codeBuffer.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/asm/codeBuffer.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -27,6 +27,7 @@ #include "code/oopRecorder.hpp" #include "code/relocInfo.hpp" +#include "utilities/debug.hpp" class CodeStrings; class PhaseCFG; @@ -245,15 +246,39 @@ private: #ifndef PRODUCT CodeString* _strings; +#ifdef ASSERT + // Becomes true after copy-out, forbids further use. + bool _defunct; // Zero bit pattern is "valid", see memset call in decode_env::decode_env +#endif #endif CodeString* find(intptr_t offset) const; CodeString* find_last(intptr_t offset) const; + void set_null_and_invalidate() { +#ifndef PRODUCT + _strings = NULL; +#ifdef ASSERT + _defunct = true; +#endif +#endif + } + public: CodeStrings() { #ifndef PRODUCT _strings = NULL; +#ifdef ASSERT + _defunct = false; +#endif +#endif + } + + bool is_null() { +#ifdef ASSERT + return _strings == NULL; +#else + return true; #endif } @@ -261,8 +286,17 @@ void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN; void print_block_comment(outputStream* stream, intptr_t offset) const PRODUCT_RETURN; + // MOVE strings from other to this; invalidate other. void assign(CodeStrings& other) PRODUCT_RETURN; + // COPY strings from other to this; leave other valid. + void copy(CodeStrings& other) PRODUCT_RETURN; void free() PRODUCT_RETURN; + // Guarantee that _strings are used at most once; assign invalidates a buffer. + inline void check_valid() const { +#ifdef ASSERT + assert(!_defunct, "Use of invalid CodeStrings"); +#endif + } }; // A CodeBuffer describes a memory space into which assembly @@ -330,7 +364,7 @@ csize_t _total_size; // size in bytes of combined memory buffer OopRecorder* _oop_recorder; - CodeStrings _strings; + CodeStrings _code_strings; OopRecorder _default_oop_recorder; // override with initialize_oop_recorder Arena* _overflow_arena; @@ -531,7 +565,13 @@ void initialize_oop_recorder(OopRecorder* r); OopRecorder* oop_recorder() const { return _oop_recorder; } - CodeStrings& strings() { return _strings; } + CodeStrings& strings() { return _code_strings; } + + void free_strings() { + if (!_code_strings.is_null()) { + _code_strings.free(); // sets _strings Null as a side-effect. + } + } // Code generation void relocate(address at, RelocationHolder const& rspec, int format = 0) { --- ./hotspot/src/share/vm/asm/register.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/asm/register.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -275,4 +275,101 @@ ); } +inline void assert_different_registers( + AbstractRegister a, + AbstractRegister b, + AbstractRegister c, + AbstractRegister d, + AbstractRegister e, + AbstractRegister f, + AbstractRegister g, + AbstractRegister h, + AbstractRegister i, + AbstractRegister j +) { + assert( + a != b && a != c && a != d && a != e && a != f && a != g && a != h && a != i && a != j + && b != c && b != d && b != e && b != f && b != g && b != h && b != i && b != j + && c != d && c != e && c != f && c != g && c != h && c != i && c != j + && d != e && d != f && d != g && d != h && d != i && d != j + && e != f && e != g && e != h && e != i && e != j + && f != g && f != h && f != i && f != j + && g != h && g != i && g != j + && h != i && h != j + && i != j, + err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT + ", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT + ", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT + ", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT "", + p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j)) + ); +} + +inline void assert_different_registers( + AbstractRegister a, + AbstractRegister b, + AbstractRegister c, + AbstractRegister d, + AbstractRegister e, + AbstractRegister f, + AbstractRegister g, + AbstractRegister h, + AbstractRegister i, + AbstractRegister j, + AbstractRegister k +) { + assert( + a != b && a != c && a != d && a != e && a != f && a != g && a != h && a != i && a != j && a !=k + && b != c && b != d && b != e && b != f && b != g && b != h && b != i && b != j && b !=k + && c != d && c != e && c != f && c != g && c != h && c != i && c != j && c !=k + && d != e && d != f && d != g && d != h && d != i && d != j && d !=k + && e != f && e != g && e != h && e != i && e != j && e !=k + && f != g && f != h && f != i && f != j && f !=k + && g != h && g != i && g != j && g !=k + && h != i && h != j && h !=k + && i != j && i !=k + && j !=k, + err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT + ", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT + ", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT + ", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT ", k=" INTPTR_FORMAT "", + p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j), p2i(k)) + ); +} + +inline void assert_different_registers( + AbstractRegister a, + AbstractRegister b, + AbstractRegister c, + AbstractRegister d, + AbstractRegister e, + AbstractRegister f, + AbstractRegister g, + AbstractRegister h, + AbstractRegister i, + AbstractRegister j, + AbstractRegister k, + AbstractRegister l +) { + assert( + a != b && a != c && a != d && a != e && a != f && a != g && a != h && a != i && a != j && a !=k && a !=l + && b != c && b != d && b != e && b != f && b != g && b != h && b != i && b != j && b !=k && b !=l + && c != d && c != e && c != f && c != g && c != h && c != i && c != j && c !=k && c !=l + && d != e && d != f && d != g && d != h && d != i && d != j && d !=k && d !=l + && e != f && e != g && e != h && e != i && e != j && e !=k && e !=l + && f != g && f != h && f != i && f != j && f !=k && f !=l + && g != h && g != i && g != j && g !=k && g !=l + && h != i && h != j && h !=k && h !=l + && i != j && i !=k && i !=l + && j !=k && j !=l + && k !=l, + err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT + ", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT + ", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT + ", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT ", k=" INTPTR_FORMAT + ", l=" INTPTR_FORMAT "", + p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j), p2i(k), p2i(l)) + ); +} + #endif // SHARE_VM_ASM_REGISTER_HPP --- ./hotspot/src/share/vm/c1/c1_Canonicalizer.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/c1/c1_Canonicalizer.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -327,7 +327,7 @@ if (t2->is_constant()) { switch (t2->tag()) { case intTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return; - case longTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return; + case longTag : if (t2->as_LongConstant()->value() == (jlong)0) set_canonical(x->x()); return; default : ShouldNotReachHere(); } } @@ -808,28 +808,41 @@ static bool match_index_and_scale(Instruction* instr, Instruction** index, - int* log2_scale, - Instruction** instr_to_unpin) { - *instr_to_unpin = NULL; - - // Skip conversion ops + int* log2_scale) { + // Skip conversion ops. This works only on 32bit because of the implicit l2i that the + // unsafe performs. +#ifndef _LP64 Convert* convert = instr->as_Convert(); - if (convert != NULL) { + if (convert != NULL && convert->op() == Bytecodes::_i2l) { + assert(convert->value()->type() == intType, "invalid input type"); instr = convert->value(); } +#endif ShiftOp* shift = instr->as_ShiftOp(); if (shift != NULL) { - if (shift->is_pinned()) { - *instr_to_unpin = shift; + if (shift->op() == Bytecodes::_lshl) { + assert(shift->x()->type() == longType, "invalid input type"); + } else { +#ifndef _LP64 + if (shift->op() == Bytecodes::_ishl) { + assert(shift->x()->type() == intType, "invalid input type"); + } else { + return false; + } +#else + return false; +#endif } + + // Constant shift value? Constant* con = shift->y()->as_Constant(); if (con == NULL) return false; // Well-known type and value? IntConstant* val = con->type()->as_IntConstant(); - if (val == NULL) return false; - if (shift->x()->type() != intType) return false; + assert(val != NULL, "Should be an int constant"); + *index = shift->x(); int tmp_scale = val->value(); if (tmp_scale >= 0 && tmp_scale < 4) { @@ -842,31 +855,42 @@ ArithmeticOp* arith = instr->as_ArithmeticOp(); if (arith != NULL) { - if (arith->is_pinned()) { - *instr_to_unpin = arith; + // See if either arg is a known constant + Constant* con = arith->x()->as_Constant(); + if (con != NULL) { + *index = arith->y(); + } else { + con = arith->y()->as_Constant(); + if (con == NULL) return false; + *index = arith->x(); } + long const_value; // Check for integer multiply - if (arith->op() == Bytecodes::_imul) { - // See if either arg is a known constant - Constant* con = arith->x()->as_Constant(); - if (con != NULL) { - *index = arith->y(); + if (arith->op() == Bytecodes::_lmul) { + assert((*index)->type() == longType, "invalid input type"); + LongConstant* val = con->type()->as_LongConstant(); + assert(val != NULL, "expecting a long constant"); + const_value = val->value(); + } else { +#ifndef _LP64 + if (arith->op() == Bytecodes::_imul) { + assert((*index)->type() == intType, "invalid input type"); + IntConstant* val = con->type()->as_IntConstant(); + assert(val != NULL, "expecting an int constant"); + const_value = val->value(); } else { - con = arith->y()->as_Constant(); - if (con == NULL) return false; - *index = arith->x(); + return false; } - if ((*index)->type() != intType) return false; - // Well-known type and value? - IntConstant* val = con->type()->as_IntConstant(); - if (val == NULL) return false; - switch (val->value()) { - case 1: *log2_scale = 0; return true; - case 2: *log2_scale = 1; return true; - case 4: *log2_scale = 2; return true; - case 8: *log2_scale = 3; return true; - default: return false; - } +#else + return false; +#endif + } + switch (const_value) { + case 1: *log2_scale = 0; return true; + case 2: *log2_scale = 1; return true; + case 4: *log2_scale = 2; return true; + case 8: *log2_scale = 3; return true; + default: return false; } } @@ -879,29 +903,37 @@ Instruction** base, Instruction** index, int* log2_scale) { - Instruction* instr_to_unpin = NULL; ArithmeticOp* root = x->base()->as_ArithmeticOp(); if (root == NULL) return false; // Limit ourselves to addition for now if (root->op() != Bytecodes::_ladd) return false; + + bool match_found = false; // Try to find shift or scale op - if (match_index_and_scale(root->y(), index, log2_scale, &instr_to_unpin)) { + if (match_index_and_scale(root->y(), index, log2_scale)) { *base = root->x(); - } else if (match_index_and_scale(root->x(), index, log2_scale, &instr_to_unpin)) { + match_found = true; + } else if (match_index_and_scale(root->x(), index, log2_scale)) { *base = root->y(); - } else if (root->y()->as_Convert() != NULL) { + match_found = true; + } else if (NOT_LP64(root->y()->as_Convert() != NULL) LP64_ONLY(false)) { + // Skipping i2l works only on 32bit because of the implicit l2i that the unsafe performs. + // 64bit needs a real sign-extending conversion. Convert* convert = root->y()->as_Convert(); - if (convert->op() == Bytecodes::_i2l && convert->value()->type() == intType) { + if (convert->op() == Bytecodes::_i2l) { + assert(convert->value()->type() == intType, "should be an int"); // pick base and index, setting scale at 1 *base = root->x(); *index = convert->value(); *log2_scale = 0; - } else { - return false; + match_found = true; } - } else { - // doesn't match any expected sequences - return false; + } + // The default solution + if (!match_found) { + *base = root->x(); + *index = root->y(); + *log2_scale = 0; } // If the value is pinned then it will be always be computed so --- ./hotspot/src/share/vm/c1/c1_Compiler.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/c1/c1_Compiler.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -47,7 +47,7 @@ void Compiler::init_c1_runtime() { BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob(); - Arena* arena = new (mtCompiler) Arena(); + Arena* arena = new (mtCompiler) Arena(mtCompiler); Runtime1::initialize(buffer_blob); FrameMap::initialize(); // initialize data structures --- ./hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -2061,7 +2061,7 @@ bool will_link; ciKlass* klass = stream()->get_klass(will_link); assert(klass->is_instance_klass(), "must be an instance klass"); - NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before); + NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass()); _memory->new_instance(new_instance); apush(append_split(new_instance)); } @@ -3960,10 +3960,15 @@ // Clear out bytecode stream scope_data()->set_stream(NULL); + CompileLog* log = compilation()->log(); + if (log != NULL) log->head("parse method='%d'", log->identify(callee)); + // Ready to resume parsing in callee (either in the same block we // were in before or in the callee's start block) iterate_all_blocks(callee_start_block == NULL); + if (log != NULL) log->done("parse"); + // If we bailed out during parsing, return immediately (this is bad news) if (bailed_out()) return false; --- ./hotspot/src/share/vm/c1/c1_Instruction.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/c1/c1_Instruction.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1291,16 +1291,18 @@ LEAF(NewInstance, StateSplit) private: ciInstanceKlass* _klass; + bool _is_unresolved; public: // creation - NewInstance(ciInstanceKlass* klass, ValueStack* state_before) + NewInstance(ciInstanceKlass* klass, ValueStack* state_before, bool is_unresolved) : StateSplit(instanceType, state_before) - , _klass(klass) + , _klass(klass), _is_unresolved(is_unresolved) {} // accessors ciInstanceKlass* klass() const { return _klass; } + bool is_unresolved() const { return _is_unresolved; } virtual bool needs_exception_state() const { return false; } --- ./hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -466,8 +466,11 @@ } -void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info) { - if (!obj->is_loaded() || PatchALot) { +void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) { + /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation + * is active and the class hasn't yet been resolved we need to emit a patch that resolves + * the class. */ + if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) { assert(info != NULL, "info must be set if class is not loaded"); __ klass2reg_patch(NULL, r, info); } else { @@ -660,9 +663,18 @@ __ unlock_object(hdr, object, lock, scratch, slow_path); } - -void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { - klass2reg_with_patching(klass_reg, klass, info); +#ifndef PRODUCT +void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) { + if (PrintNotLoaded && !new_instance->klass()->is_loaded()) { + tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci()); + } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) { + tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci()); + } +} +#endif + +void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { + klass2reg_with_patching(klass_reg, klass, info, is_unresolved); // If klass is not loaded we do not know if the klass has finalizers: if (UseFastNewInstance && klass->is_loaded() && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) { @@ -2030,6 +2042,8 @@ } } +// Here UnsafeGetRaw may have x->base() and x->index() be int or long +// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit. void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { LIRItem base(x->base(), this); LIRItem idx(this); @@ -2044,50 +2058,73 @@ int log2_scale = 0; if (x->has_index()) { - assert(x->index()->type()->tag() == intTag, "should not find non-int index"); log2_scale = x->log2_scale(); } assert(!x->has_index() || idx.value() == x->index(), "should match"); LIR_Opr base_op = base.result(); + LIR_Opr index_op = idx.result(); #ifndef _LP64 - if (x->base()->type()->tag() == longTag) { + if (base_op->type() == T_LONG) { base_op = new_register(T_INT); __ convert(Bytecodes::_l2i, base.result(), base_op); - } else { - assert(x->base()->type()->tag() == intTag, "must be"); } + if (x->has_index()) { + if (index_op->type() == T_LONG) { + LIR_Opr long_index_op = index_op; + if (index_op->is_constant()) { + long_index_op = new_register(T_LONG); + __ move(index_op, long_index_op); + } + index_op = new_register(T_INT); + __ convert(Bytecodes::_l2i, long_index_op, index_op); + } else { + assert(x->index()->type()->tag() == intTag, "must be"); + } + } + // At this point base and index should be all ints. + assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int"); + assert(!x->has_index() || index_op->type() == T_INT, "index should be an int"); +#else + if (x->has_index()) { + if (index_op->type() == T_INT) { + if (!index_op->is_constant()) { + index_op = new_register(T_LONG); + __ convert(Bytecodes::_i2l, idx.result(), index_op); + } + } else { + assert(index_op->type() == T_LONG, "must be"); + if (index_op->is_constant()) { + index_op = new_register(T_LONG); + __ move(idx.result(), index_op); + } + } + } + // At this point base is a long non-constant + // Index is a long register or a int constant. + // We allow the constant to stay an int because that would allow us a more compact encoding by + // embedding an immediate offset in the address expression. If we have a long constant, we have to + // move it into a register first. + assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant"); + assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) || + (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type"); #endif BasicType dst_type = x->basic_type(); - LIR_Opr index_op = idx.result(); LIR_Address* addr; if (index_op->is_constant()) { assert(log2_scale == 0, "must not have a scale"); + assert(index_op->type() == T_INT, "only int constants supported"); addr = new LIR_Address(base_op, index_op->as_jint(), dst_type); } else { #ifdef X86 -#ifdef _LP64 - if (!index_op->is_illegal() && index_op->type() == T_INT) { - LIR_Opr tmp = new_pointer_register(); - __ convert(Bytecodes::_i2l, index_op, tmp); - index_op = tmp; - } -#endif addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); #elif defined(ARM) addr = generate_address(base_op, index_op, log2_scale, 0, dst_type); #else if (index_op->is_illegal() || log2_scale == 0) { -#ifdef _LP64 - if (!index_op->is_illegal() && index_op->type() == T_INT) { - LIR_Opr tmp = new_pointer_register(); - __ convert(Bytecodes::_i2l, index_op, tmp); - index_op = tmp; - } -#endif addr = new LIR_Address(base_op, index_op, dst_type); } else { LIR_Opr tmp = new_pointer_register(); @@ -2114,7 +2151,6 @@ BasicType type = x->basic_type(); if (x->has_index()) { - assert(x->index()->type()->tag() == intTag, "should not find non-int index"); log2_scale = x->log2_scale(); } @@ -2137,38 +2173,39 @@ set_no_result(x); LIR_Opr base_op = base.result(); + LIR_Opr index_op = idx.result(); + #ifndef _LP64 - if (x->base()->type()->tag() == longTag) { + if (base_op->type() == T_LONG) { base_op = new_register(T_INT); __ convert(Bytecodes::_l2i, base.result(), base_op); - } else { - assert(x->base()->type()->tag() == intTag, "must be"); } + if (x->has_index()) { + if (index_op->type() == T_LONG) { + index_op = new_register(T_INT); + __ convert(Bytecodes::_l2i, idx.result(), index_op); + } + } + // At this point base and index should be all ints and not constants + assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int"); + assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int"); +#else + if (x->has_index()) { + if (index_op->type() == T_INT) { + index_op = new_register(T_LONG); + __ convert(Bytecodes::_i2l, idx.result(), index_op); + } + } + // At this point base and index are long and non-constant + assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long"); + assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long"); #endif - LIR_Opr index_op = idx.result(); if (log2_scale != 0) { // temporary fix (platform dependent code without shift on Intel would be better) - index_op = new_pointer_register(); -#ifdef _LP64 - if(idx.result()->type() == T_INT) { - __ convert(Bytecodes::_i2l, idx.result(), index_op); - } else { -#endif - // TODO: ARM also allows embedded shift in the address - __ move(idx.result(), index_op); -#ifdef _LP64 - } -#endif + // TODO: ARM also allows embedded shift in the address __ shift_left(index_op, log2_scale, index_op); } -#ifdef _LP64 - else if(!index_op->is_illegal() && index_op->type() == T_INT) { - LIR_Opr tmp = new_pointer_register(); - __ convert(Bytecodes::_i2l, index_op, tmp); - index_op = tmp; - } -#endif LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type()); __ move(value.result(), addr); --- ./hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -169,6 +169,8 @@ return this; } + void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN; + #ifdef ASSERT LIR_List* lir(const char * file, int line) const { _lir->set_file_and_line(file, line); @@ -307,7 +309,7 @@ void store_stack_parameter (LIR_Opr opr, ByteSize offset_from_sp_in_bytes); - void klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info); + void klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve = false); // this loads the length and compares against the index void array_range_check (LIR_Opr array, LIR_Opr index, CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info); @@ -325,7 +327,7 @@ void monitor_enter (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info); void monitor_exit (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no); - void new_instance (LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info); + void new_instance (LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info); // machine dependent void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info); --- ./hotspot/src/share/vm/c1/c1_LinearScan.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/c1/c1_LinearScan.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1628,25 +1628,22 @@ Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals; Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals; - create_unhandled_lists(&precolored_cpu_intervals, ¬_precolored_cpu_intervals, is_precolored_cpu_interval, is_virtual_cpu_interval); - if (has_fpu_registers()) { - create_unhandled_lists(&precolored_fpu_intervals, ¬_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval); -#ifdef ASSERT - } else { - // fpu register allocation is omitted because no virtual fpu registers are present - // just check this again... - create_unhandled_lists(&precolored_fpu_intervals, ¬_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval); - assert(not_precolored_fpu_intervals == Interval::end(), "missed an uncolored fpu interval"); -#endif - } - // allocate cpu registers + create_unhandled_lists(&precolored_cpu_intervals, ¬_precolored_cpu_intervals, + is_precolored_cpu_interval, is_virtual_cpu_interval); + + // allocate fpu registers + create_unhandled_lists(&precolored_fpu_intervals, ¬_precolored_fpu_intervals, + is_precolored_fpu_interval, is_virtual_fpu_interval); + + // the fpu interval allocation cannot be moved down below with the fpu section as + // the cpu_lsw.walk() changes interval positions. + LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals); cpu_lsw.walk(); cpu_lsw.finish_allocation(); if (has_fpu_registers()) { - // allocate fpu registers LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals); fpu_lsw.walk(); fpu_lsw.finish_allocation(); --- ./hotspot/src/share/vm/c1/c1_Runtime1.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/c1/c1_Runtime1.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -544,13 +544,18 @@ // normal bytecode execution. thread->clear_exception_oop_and_pc(); + Handle original_exception(thread, exception()); + continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false); // If an exception was thrown during exception dispatch, the exception oop may have changed thread->set_exception_oop(exception()); thread->set_exception_pc(pc); // the exception cache is used only by non-implicit exceptions - if (continuation != NULL) { + // Update the exception cache only when there didn't happen + // another exception during the computation of the compiled + // exception handler. + if (continuation != NULL && original_exception() == exception()) { nm->add_handler_for_exception_and_pc(exception, pc, continuation); } } @@ -1018,6 +1023,7 @@ n_copy->set_data((intx) (load_klass())); } else { assert(mirror() != NULL, "klass not set"); + // Don't need a G1 pre-barrier here since we assert above that data isn't an oop. n_copy->set_data(cast_from_oop(mirror())); } --- ./hotspot/src/share/vm/c1/c1_globals.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/c1/c1_globals.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -287,9 +287,6 @@ develop(bool, InstallMethods, true, \ "Install methods at the end of successful compilations") \ \ - product(intx, CompilationRepeat, 0, \ - "Number of times to recompile method before returning result") \ - \ develop(intx, NMethodSizeLimit, (64*K)*wordSize, \ "Maximum size of a compiled method.") \ \ --- ./hotspot/src/share/vm/ci/ciEnv.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/ci/ciEnv.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -51,6 +51,7 @@ #include "runtime/init.hpp" #include "runtime/reflection.hpp" #include "runtime/sharedRuntime.hpp" +#include "runtime/thread.inline.hpp" #include "utilities/dtrace.hpp" #include "utilities/macros.hpp" #ifdef COMPILER1 @@ -85,7 +86,8 @@ // ------------------------------------------------------------------ // ciEnv::ciEnv -ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) { +ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) + : _ciEnv_arena(mtCompiler) { VM_ENTRY_MARK; // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc. @@ -138,7 +140,7 @@ _the_min_jint_string = NULL; } -ciEnv::ciEnv(Arena* arena) { +ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) { ASSERT_IN_VM; // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc. @@ -557,7 +559,12 @@ oop obj = cpool->resolved_references()->obj_at(cache_index); if (obj != NULL) { ciObject* ciobj = get_object(obj); - return ciConstant(T_OBJECT, ciobj); + if (ciobj->is_array()) { + return ciConstant(T_ARRAY, ciobj); + } else { + assert(ciobj->is_instance(), "should be an instance"); + return ciConstant(T_OBJECT, ciobj); + } } index = cpool->object_to_cp_index(cache_index); } @@ -584,8 +591,12 @@ } } ciObject* constant = get_object(string); - assert (constant->is_instance(), "must be an instance, or not? "); - return ciConstant(T_OBJECT, constant); + if (constant->is_array()) { + return ciConstant(T_ARRAY, constant); + } else { + assert (constant->is_instance(), "must be an instance, or not? "); + return ciConstant(T_OBJECT, constant); + } } else if (tag.is_klass() || tag.is_unresolved_klass()) { // 4881222: allow ldc to take a class type ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore_will_link, accessor); @@ -1110,9 +1121,6 @@ // ------------------------------------------------------------------ // ciEnv::record_failure() void ciEnv::record_failure(const char* reason) { - if (log() != NULL) { - log()->elem("failure reason='%s'", reason); - } if (_failure_reason == NULL) { // Record the first failure reason. _failure_reason = reason; --- ./hotspot/src/share/vm/ci/ciEnv.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/ci/ciEnv.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -184,6 +184,10 @@ } } + void ensure_metadata_alive(ciMetadata* m) { + _factory->ensure_metadata_alive(m); + } + ciInstance* get_instance(oop o) { if (o == NULL) return NULL; return get_object(o)->as_instance(); --- ./hotspot/src/share/vm/ci/ciKlass.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/ci/ciKlass.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -43,6 +43,7 @@ friend class ciMethod; friend class ciMethodData; friend class ciObjArrayKlass; + friend class ciReceiverTypeData; private: ciSymbol* _name; --- ./hotspot/src/share/vm/ci/ciMethod.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/ci/ciMethod.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -68,7 +68,10 @@ // ciMethod::ciMethod // // Loaded method. -ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) { +ciMethod::ciMethod(methodHandle h_m, ciInstanceKlass* holder) : + ciMetadata(h_m()), + _holder(holder) +{ assert(h_m() != NULL, "no null method"); // These fields are always filled in in loaded methods. @@ -124,7 +127,6 @@ // generating _signature may allow GC and therefore move m. // These fields are always filled in. _name = env->get_symbol(h_m()->name()); - _holder = env->get_instance_klass(h_m()->method_holder()); ciSymbol* sig_symbol = env->get_symbol(h_m()->signature()); constantPoolHandle cpool = h_m()->constants(); _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol); @@ -1106,6 +1108,22 @@ } // ------------------------------------------------------------------ +// ciMethod::has_option_value +// +template +bool ciMethod::has_option_value(const char* option, T& value) { + check_is_loaded(); + VM_ENTRY_MARK; + methodHandle mh(THREAD, get_Method()); + return CompilerOracle::has_option_value(mh, option, value); +} +// Explicit instantiation for all OptionTypes supported. +template bool ciMethod::has_option_value(const char* option, intx& value); +template bool ciMethod::has_option_value(const char* option, uintx& value); +template bool ciMethod::has_option_value(const char* option, bool& value); +template bool ciMethod::has_option_value(const char* option, ccstr& value); + +// ------------------------------------------------------------------ // ciMethod::can_be_compiled // // Have previous compilations of this method succeeded? --- ./hotspot/src/share/vm/ci/ciMethod.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/ci/ciMethod.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -90,7 +90,7 @@ BCEscapeAnalyzer* _bcea; #endif - ciMethod(methodHandle h_m); + ciMethod(methodHandle h_m, ciInstanceKlass* holder); ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor); Method* get_Method() const { @@ -264,6 +264,8 @@ bool should_print_assembly(); bool break_at_execute(); bool has_option(const char *option); + template + bool has_option_value(const char* option, T& value); bool can_be_compiled(); bool can_be_osr_compiled(int entry_bci); void set_not_compilable(const char* reason = NULL); --- ./hotspot/src/share/vm/ci/ciMethodData.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/ci/ciMethodData.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -170,6 +170,7 @@ Klass* k = data->as_ReceiverTypeData()->receiver(row); if (k != NULL) { ciKlass* klass = CURRENT_ENV->get_klass(k); + CURRENT_ENV->ensure_metadata_alive(klass); set_receiver(row, klass); } } @@ -191,6 +192,7 @@ void ciSpeculativeTrapData::translate_from(const ProfileData* data) { Method* m = data->as_SpeculativeTrapData()->method(); ciMethod* ci_m = CURRENT_ENV->get_method(m); + CURRENT_ENV->ensure_metadata_alive(ci_m); set_method(ci_m); } --- ./hotspot/src/share/vm/ci/ciMethodData.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/ci/ciMethodData.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -70,6 +70,7 @@ Klass* v = TypeEntries::valid_klass(k); if (v != NULL) { ciKlass* klass = CURRENT_ENV->get_klass(v); + CURRENT_ENV->ensure_metadata_alive(klass); return with_status(klass, k); } return with_status(NULL, k); --- ./hotspot/src/share/vm/ci/ciObjectFactory.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/ci/ciObjectFactory.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,9 @@ #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" #include "runtime/fieldType.hpp" +#if INCLUDE_ALL_GCS +# include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +#endif // ciObjectFactory // @@ -109,7 +112,7 @@ // This Arena is long lived and exists in the resource mark of the // compiler thread that initializes the initial ciObjectFactory which // creates the shared ciObjects that all later ciObjectFactories use. - Arena* arena = new (mtCompiler) Arena(); + Arena* arena = new (mtCompiler) Arena(mtCompiler); ciEnv initial(arena); ciEnv* env = ciEnv::current(); env->_factory->init_shared_objects(); @@ -236,7 +239,7 @@ ciObject* ciObjectFactory::get(oop key) { ASSERT_IN_VM; - assert(key == NULL || Universe::heap()->is_in_reserved(key), "must be"); + assert(Universe::heap()->is_in_reserved(key), "must be"); NonPermObject* &bucket = find_non_perm(key); if (bucket != NULL) { @@ -257,10 +260,10 @@ } // ------------------------------------------------------------------ -// ciObjectFactory::get +// ciObjectFactory::get_metadata // -// Get the ciObject corresponding to some oop. If the ciObject has -// already been created, it is returned. Otherwise, a new ciObject +// Get the ciMetadata corresponding to some Metadata. If the ciMetadata has +// already been created, it is returned. Otherwise, a new ciMetadata // is created. ciMetadata* ciObjectFactory::get_metadata(Metadata* key) { ASSERT_IN_VM; @@ -287,9 +290,9 @@ } #endif if (!is_found_at(index, key, _ci_metadata)) { - // The ciObject does not yet exist. Create it and insert it + // The ciMetadata does not yet exist. Create it and insert it // into the cache. - ciMetadata* new_object = create_new_object(key); + ciMetadata* new_object = create_new_metadata(key); init_ident_of(new_object); assert(new_object->is_metadata(), "must be"); @@ -341,15 +344,28 @@ } // ------------------------------------------------------------------ -// ciObjectFactory::create_new_object +// ciObjectFactory::create_new_metadata // -// Create a new ciObject from a Metadata*. +// Create a new ciMetadata from a Metadata*. // -// Implementation note: this functionality could be virtual behavior -// of the oop itself. For now, we explicitly marshal the object. -ciMetadata* ciObjectFactory::create_new_object(Metadata* o) { +// Implementation note: in order to keep Metadata live, an auxiliary ciObject +// is used, which points to it's holder. +ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) { EXCEPTION_CONTEXT; + // Hold metadata from unloading by keeping it's holder alive. + if (_initialized && o->is_klass()) { + Klass* holder = ((Klass*)o); + if (holder->oop_is_instance() && InstanceKlass::cast(holder)->is_anonymous()) { + // Though ciInstanceKlass records class loader oop, it's not enough to keep + // VM anonymous classes alive (loader == NULL). Klass holder should be used instead. + // It is enough to record a ciObject, since cached elements are never removed + // during ciObjectFactory lifetime. ciObjectFactory itself is created for + // every compilation and lives for the whole duration of the compilation. + ciObject* h = get(holder->klass_holder()); + } + } + if (o->is_klass()) { KlassHandle h_k(THREAD, (Klass*)o); Klass* k = (Klass*)o; @@ -362,18 +378,51 @@ } } else if (o->is_method()) { methodHandle h_m(THREAD, (Method*)o); - return new (arena()) ciMethod(h_m); + ciEnv *env = CURRENT_THREAD_ENV; + ciInstanceKlass* holder = env->get_instance_klass(h_m()->method_holder()); + return new (arena()) ciMethod(h_m, holder); } else if (o->is_methodData()) { // Hold methodHandle alive - might not be necessary ??? methodHandle h_m(THREAD, ((MethodData*)o)->method()); return new (arena()) ciMethodData((MethodData*)o); } - // The oop is of some type not supported by the compiler interface. + // The Metadata* is of some type not supported by the compiler interface. ShouldNotReachHere(); return NULL; } +// ------------------------------------------------------------------ +// ciObjectFactory::ensure_metadata_alive +// +// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC. +// This is primarily useful for metadata which is considered as weak roots +// by the GC but need to be strong roots if reachable from a current compilation. +// +void ciObjectFactory::ensure_metadata_alive(ciMetadata* m) { + ASSERT_IN_VM; // We're handling raw oops here. + +#if INCLUDE_ALL_GCS + if (!UseG1GC) { + return; + } + Klass* metadata_owner_klass; + if (m->is_klass()) { + metadata_owner_klass = m->as_klass()->get_Klass(); + } else if (m->is_method()) { + metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder(); + } else { + fatal("Not implemented for other types of metadata"); + } + + oop metadata_holder = metadata_owner_klass->klass_holder(); + if (metadata_holder != NULL) { + G1SATBCardTableModRefBS::enqueue(metadata_holder); + } + +#endif +} + //------------------------------------------------------------------ // ciObjectFactory::get_unloaded_method // @@ -667,7 +716,7 @@ // If there is no entry in the cache corresponding to this oop, return // the null tail of the bucket into which the oop should be inserted. ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) { - assert(Universe::heap()->is_in_reserved_or_null(key), "must be"); + assert(Universe::heap()->is_in_reserved(key), "must be"); ciMetadata* klass = get_metadata(key->klass()); NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS]; for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) { --- ./hotspot/src/share/vm/ci/ciObjectFactory.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/ci/ciObjectFactory.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -73,7 +73,9 @@ void insert(int index, ciMetadata* obj, GrowableArray* objects); ciObject* create_new_object(oop o); - ciMetadata* create_new_object(Metadata* o); + ciMetadata* create_new_metadata(Metadata* o); + + void ensure_metadata_alive(ciMetadata* m); static bool is_equal(NonPermObject* p, oop key) { return p->object()->get_oop() == key; --- ./hotspot/src/share/vm/ci/ciTypeFlow.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/ci/ciTypeFlow.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -35,6 +35,7 @@ #include "interpreter/bytecode.hpp" #include "interpreter/bytecodes.hpp" #include "memory/allocation.inline.hpp" +#include "opto/compile.hpp" #include "runtime/deoptimization.hpp" #include "utilities/growableArray.hpp" @@ -730,7 +731,7 @@ if (obj->is_null_object()) { push_null(); } else { - assert(obj->is_instance(), "must be java_mirror of klass"); + assert(obj->is_instance() || obj->is_array(), "must be java_mirror of klass"); push_object(obj->klass()); } } else { @@ -2646,7 +2647,7 @@ assert (!blk->has_pre_order(), ""); blk->set_next_pre_order(); - if (_next_pre_order >= MaxNodeLimit / 2) { + if (_next_pre_order >= (int)Compile::current()->max_node_limit() / 2) { // Too many basic blocks. Bail out. // This can happen when try/finally constructs are nested to depth N, // and there is O(2**N) cloning of jsr bodies. See bug 4697245! --- ./hotspot/src/share/vm/classfile/classFileParser.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/classFileParser.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -31,6 +31,9 @@ #include "classfile/javaClasses.hpp" #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" +#if INCLUDE_CDS +#include "classfile/systemDictionaryShared.hpp" +#endif #include "classfile/verificationType.hpp" #include "classfile/verifier.hpp" #include "classfile/vmSymbols.hpp" @@ -60,6 +63,7 @@ #include "services/threadService.hpp" #include "utilities/array.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/ostream.hpp" // We generally try to create the oops directly when parsing, rather than // allocating temporary data structures and copying the bytes twice. A @@ -2525,7 +2529,7 @@ Array* ClassFileParser::parse_methods(bool is_interface, AccessFlags* promoted_flags, bool* has_final_method, - bool* has_default_methods, + bool* declares_default_methods, TRAPS) { ClassFileStream* cfs = stream(); cfs->guarantee_more(2, CHECK_NULL); // length @@ -2544,11 +2548,11 @@ if (method->is_final()) { *has_final_method = true; } - if (is_interface && !(*has_default_methods) - && !method->is_abstract() && !method->is_static() - && !method->is_private()) { - // default method - *has_default_methods = true; + // declares_default_methods: declares concrete instance methods, any access flags + // used for interface initialization, and default method inheritance analysis + if (is_interface && !(*declares_default_methods) + && !method->is_abstract() && !method->is_static()) { + *declares_default_methods = true; } _methods->at_put(index, method()); } @@ -2780,19 +2784,19 @@ ClassFileStream* cfs = stream(); u1* current_start = cfs->current(); - cfs->guarantee_more(attribute_byte_length, CHECK); - - int attribute_array_length = cfs->get_u2_fast(); - - guarantee_property(_max_bootstrap_specifier_index < attribute_array_length, - "Short length on BootstrapMethods in class file %s", - CHECK); - guarantee_property(attribute_byte_length >= sizeof(u2), "Invalid BootstrapMethods attribute length %u in class file %s", attribute_byte_length, CHECK); + cfs->guarantee_more(attribute_byte_length, CHECK); + + int attribute_array_length = cfs->get_u2_fast(); + + guarantee_property(_max_bootstrap_specifier_index < attribute_array_length, + "Short length on BootstrapMethods in class file %s", + CHECK); + // The attribute contains a counted array of counted tuples of shorts, // represending bootstrap specifiers: // length*{bootstrap_method_index, argument_count*{argument_index}} @@ -3054,21 +3058,39 @@ } } -// Transfer ownership of metadata allocated to the InstanceKlass. -void ClassFileParser::apply_parsed_class_metadata( - instanceKlassHandle this_klass, - int java_fields_count, TRAPS) { - // Assign annotations if needed - if (_annotations != NULL || _type_annotations != NULL || - _fields_annotations != NULL || _fields_type_annotations != NULL) { +// Create the Annotations object that will +// hold the annotations array for the Klass. +void ClassFileParser::create_combined_annotations(TRAPS) { + if (_annotations == NULL && + _type_annotations == NULL && + _fields_annotations == NULL && + _fields_type_annotations == NULL) { + // Don't create the Annotations object unnecessarily. + return; + } + Annotations* annotations = Annotations::allocate(_loader_data, CHECK); annotations->set_class_annotations(_annotations); annotations->set_class_type_annotations(_type_annotations); annotations->set_fields_annotations(_fields_annotations); annotations->set_fields_type_annotations(_fields_type_annotations); - this_klass->set_annotations(annotations); - } - + + // This is the Annotations object that will be + // assigned to InstanceKlass being constructed. + _combined_annotations = annotations; + + // The annotations arrays below has been transfered the + // _combined_annotations so these fields can now be cleared. + _annotations = NULL; + _type_annotations = NULL; + _fields_annotations = NULL; + _fields_type_annotations = NULL; +} + +// Transfer ownership of metadata allocated to the InstanceKlass. +void ClassFileParser::apply_parsed_class_metadata( + instanceKlassHandle this_klass, + int java_fields_count, TRAPS) { _cp->set_pool_holder(this_klass()); this_klass->set_constants(_cp); this_klass->set_fields(_fields, java_fields_count); @@ -3076,6 +3098,7 @@ this_klass->set_inner_classes(_inner_classes); this_klass->set_local_interfaces(_local_interfaces); this_klass->set_transitive_interfaces(_transitive_interfaces); + this_klass->set_annotations(_combined_annotations); // Clear out these fields so they don't get deallocated by the destructor clear_class_metadata(); @@ -3687,6 +3710,7 @@ JvmtiCachedClassFileData *cached_class_file = NULL; Handle class_loader(THREAD, loader_data->class_loader()); bool has_default_methods = false; + bool declares_default_methods = false; ResourceMark rm(THREAD); ClassFileStream* cfs = stream(); @@ -3741,7 +3765,15 @@ instanceKlassHandle nullHandle; // Figure out whether we can skip format checking (matching classic VM behavior) - _need_verify = Verifier::should_verify_for(class_loader(), verify); + if (DumpSharedSpaces) { + // verify == true means it's a 'remote' class (i.e., non-boot class) + // Verification decision is based on BytecodeVerificationRemote flag + // for those classes. + _need_verify = (verify) ? BytecodeVerificationRemote : + BytecodeVerificationLocal; + } else { + _need_verify = Verifier::should_verify_for(class_loader(), verify); + } // Set the verify flag in stream cfs->set_verify(_need_verify); @@ -3760,6 +3792,18 @@ u2 minor_version = cfs->get_u2_fast(); u2 major_version = cfs->get_u2_fast(); + if (DumpSharedSpaces && major_version < JAVA_1_5_VERSION) { + ResourceMark rm; + warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s", + major_version, minor_version, name->as_C_string()); + Exceptions::fthrow( + THREAD_AND_LOCATION, + vmSymbols::java_lang_UnsupportedClassVersionError(), + "Unsupported major.minor version for dump time %u.%u", + major_version, + minor_version); + } + // Check version numbers - we check this even with verifier off if (!is_supported_version(major_version, minor_version)) { if (name == NULL) { @@ -3867,6 +3911,18 @@ if (cfs->source() != NULL) tty->print(" from %s", cfs->source()); tty->print_cr("]"); } +#if INCLUDE_CDS + if (DumpLoadedClassList != NULL && cfs->source() != NULL && classlist_file->is_open()) { + // Only dump the classes that can be stored into CDS archive + if (SystemDictionaryShared::is_sharing_possible(loader_data)) { + if (name != NULL) { + ResourceMark rm(THREAD); + classlist_file->print_cr("%s", name->as_C_string()); + classlist_file->flush(); + } + } + } +#endif u2 super_class_index = cfs->get_u2_fast(); instanceKlassHandle super_klass = parse_super_class(super_class_index, @@ -3892,13 +3948,20 @@ Array* methods = parse_methods(access_flags.is_interface(), &promoted_flags, &has_final_method, - &has_default_methods, + &declares_default_methods, CHECK_(nullHandle)); + if (declares_default_methods) { + has_default_methods = true; + } // Additional attributes ClassAnnotationCollector parsed_annotations; parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle)); + // Finalize the Annotations metadata object, + // now that all annotation arrays have been created. + create_combined_annotations(CHECK_(nullHandle)); + // Make sure this is the end of class file stream guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle)); @@ -4036,6 +4099,7 @@ this_klass->set_minor_version(minor_version); this_klass->set_major_version(major_version); this_klass->set_has_default_methods(has_default_methods); + this_klass->set_declares_default_methods(declares_default_methods); if (!host_klass.is_null()) { assert (this_klass->is_anonymous(), "should be the same"); @@ -4106,8 +4170,8 @@ } // Allocate mirror and initialize static fields - java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle)); - + java_lang_Class::create_mirror(this_klass, class_loader, protection_domain, + CHECK_(nullHandle)); // Generate any default methods - default methods are interface methods // that have a default implementation. This is new with Lambda project. @@ -4129,8 +4193,12 @@ tty->print("[Loaded %s from %s]\n", this_klass->external_name(), cfs->source()); } else if (class_loader.is_null()) { - if (THREAD->is_Java_thread()) { - Klass* caller = ((JavaThread*)THREAD)->security_get_caller_class(1); + Klass* caller = + THREAD->is_Java_thread() + ? ((JavaThread*)THREAD)->security_get_caller_class(1) + : NULL; + // caller can be NULL, for example, during a JVMTI VM_Init hook + if (caller != NULL) { tty->print("[Loaded %s by instance of %s]\n", this_klass->external_name(), InstanceKlass::cast(caller)->external_name()); @@ -4194,10 +4262,27 @@ InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(), _local_interfaces, _transitive_interfaces); - MetadataFactory::free_array(_loader_data, _annotations); - MetadataFactory::free_array(_loader_data, _type_annotations); - Annotations::free_contents(_loader_data, _fields_annotations); - Annotations::free_contents(_loader_data, _fields_type_annotations); + if (_combined_annotations != NULL) { + // After all annotations arrays have been created, they are installed into the + // Annotations object that will be assigned to the InstanceKlass being created. + + // Deallocate the Annotations object and the installed annotations arrays. + _combined_annotations->deallocate_contents(_loader_data); + + // If the _combined_annotations pointer is non-NULL, + // then the other annotations fields should have been cleared. + assert(_annotations == NULL, "Should have been cleared"); + assert(_type_annotations == NULL, "Should have been cleared"); + assert(_fields_annotations == NULL, "Should have been cleared"); + assert(_fields_type_annotations == NULL, "Should have been cleared"); + } else { + // If the annotations arrays were not installed into the Annotations object, + // then they have to be deallocated explicitly. + MetadataFactory::free_array(_loader_data, _annotations); + MetadataFactory::free_array(_loader_data, _type_annotations); + Annotations::free_contents(_loader_data, _fields_annotations); + Annotations::free_contents(_loader_data, _fields_type_annotations); + } clear_class_metadata(); --- ./hotspot/src/share/vm/classfile/classFileParser.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/classFileParser.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -75,6 +75,7 @@ Array* _inner_classes; Array* _local_interfaces; Array* _transitive_interfaces; + Annotations* _combined_annotations; AnnotationArray* _annotations; AnnotationArray* _type_annotations; Array* _fields_annotations; @@ -86,6 +87,8 @@ void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; } void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; } + void create_combined_annotations(TRAPS); + void init_parsed_class_attributes(ClassLoaderData* loader_data) { _loader_data = loader_data; _synthetic_flag = false; @@ -110,6 +113,7 @@ _inner_classes = NULL; _local_interfaces = NULL; _transitive_interfaces = NULL; + _combined_annotations = NULL; _annotations = _type_annotations = NULL; _fields_annotations = _fields_type_annotations = NULL; } @@ -247,7 +251,7 @@ Array* parse_methods(bool is_interface, AccessFlags* promoted_flags, bool* has_final_method, - bool* has_default_method, + bool* declares_default_methods, TRAPS); intArray* sort_methods(Array* methods); --- ./hotspot/src/share/vm/classfile/classFileStream.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/classFileStream.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ THROW_MSG(vmSymbols::java_lang_ClassFormatError(), "Truncated class file"); } -ClassFileStream::ClassFileStream(u1* buffer, int length, char* source) { +ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source) { _buffer_start = buffer; _buffer_end = buffer + length; _current = buffer; --- ./hotspot/src/share/vm/classfile/classFileStream.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/classFileStream.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,20 +53,20 @@ u1* _buffer_start; // Buffer bottom u1* _buffer_end; // Buffer top (one past last element) u1* _current; // Current buffer position - char* _source; // Source of stream (directory name, ZIP/JAR archive name) + const char* _source; // Source of stream (directory name, ZIP/JAR archive name) bool _need_verify; // True if verification is on for the class file void truncated_file_error(TRAPS); public: // Constructor - ClassFileStream(u1* buffer, int length, char* source); + ClassFileStream(u1* buffer, int length, const char* source); // Buffer access u1* buffer() const { return _buffer_start; } int length() const { return _buffer_end - _buffer_start; } u1* current() const { return _current; } void set_current(u1* pos) { _current = pos; } - char* source() const { return _source; } + const char* source() const { return _source; } void set_verify(bool flag) { _need_verify = flag; } void check_truncated_file(bool b, TRAPS) { --- ./hotspot/src/share/vm/classfile/classLoader.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/classLoader.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,8 +26,13 @@ #include "classfile/classFileParser.hpp" #include "classfile/classFileStream.hpp" #include "classfile/classLoader.hpp" +#include "classfile/classLoaderExt.hpp" #include "classfile/classLoaderData.inline.hpp" #include "classfile/javaClasses.hpp" +#if INCLUDE_CDS +#include "classfile/sharedPathsMiscInfo.hpp" +#include "classfile/sharedClassUtil.hpp" +#endif #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" @@ -35,6 +40,7 @@ #include "interpreter/bytecodeStream.hpp" #include "interpreter/oopMapCache.hpp" #include "memory/allocation.inline.hpp" +#include "memory/filemap.hpp" #include "memory/generation.hpp" #include "memory/oopFactory.hpp" #include "memory/universe.inline.hpp" @@ -131,8 +137,12 @@ ClassPathEntry* ClassLoader::_first_entry = NULL; ClassPathEntry* ClassLoader::_last_entry = NULL; +int ClassLoader::_num_entries = 0; PackageHashtable* ClassLoader::_package_hash_table = NULL; +#if INCLUDE_CDS +SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL; +#endif // helper routines bool string_starts_with(const char* str, const char* str_to_find) { size_t str_len = strlen(str); @@ -196,9 +206,10 @@ return false; } -ClassPathDirEntry::ClassPathDirEntry(char* dir) : ClassPathEntry() { - _dir = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass); - strcpy(_dir, dir); +ClassPathDirEntry::ClassPathDirEntry(const char* dir) : ClassPathEntry() { + char* copy = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass); + strcpy(copy, dir); + _dir = copy; } @@ -211,6 +222,14 @@ // check if file exists struct stat st; if (os::stat(path, &st) == 0) { +#if INCLUDE_CDS + if (DumpSharedSpaces) { + // We have already check in ClassLoader::check_shared_classpath() that the directory is empty, so + // we should never find a file underneath it -- unless user has added a new file while we are running + // the dump, in which case let's quit! + ShouldNotReachHere(); + } +#endif // found file, open it int file_handle = os::open(path, 0, 0); if (file_handle != -1) { @@ -234,8 +253,9 @@ ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name) : ClassPathEntry() { _zip = zip; - _zip_name = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass); - strcpy(_zip_name, zip_name); + char *copy = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass); + strcpy(copy, zip_name); + _zip_name = copy; } ClassPathZipEntry::~ClassPathZipEntry() { @@ -245,13 +265,13 @@ FREE_C_HEAP_ARRAY(char, _zip_name, mtClass); } -ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) { - // enable call to C land +u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) { + // enable call to C land JavaThread* thread = JavaThread::current(); ThreadToNativeFromVM ttn(thread); // check whether zip archive contains name - jint filesize, name_len; - jzentry* entry = (*FindEntry)(_zip, name, &filesize, &name_len); + jint name_len; + jzentry* entry = (*FindEntry)(_zip, name, filesize, &name_len); if (entry == NULL) return NULL; u1* buffer; char name_buf[128]; @@ -262,19 +282,33 @@ filename = NEW_RESOURCE_ARRAY(char, name_len + 1); } - // file found, get pointer to class in mmaped jar file. + // file found, get pointer to the entry in mmapped jar file. if (ReadMappedEntry == NULL || !(*ReadMappedEntry)(_zip, entry, &buffer, filename)) { - // mmaped access not available, perhaps due to compression, + // mmapped access not available, perhaps due to compression, // read contents into resource array - buffer = NEW_RESOURCE_ARRAY(u1, filesize); + int size = (*filesize) + ((nul_terminate) ? 1 : 0); + buffer = NEW_RESOURCE_ARRAY(u1, size); if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL; } + + // return result + if (nul_terminate) { + buffer[*filesize] = 0; + } + return buffer; +} + +ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) { + jint filesize; + u1* buffer = open_entry(name, &filesize, false, CHECK_NULL); + if (buffer == NULL) { + return NULL; + } if (UsePerfData) { ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize); } - // return result - return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated + return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated } // invoke function for each entry in the zip file @@ -289,12 +323,13 @@ } } -LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() { +LazyClassPathEntry::LazyClassPathEntry(const char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() { _path = strdup(path); _st = *st; _meta_index = NULL; _resolved_entry = NULL; _has_error = false; + _throw_exception = throw_exception; } bool LazyClassPathEntry::is_jar_file() { @@ -306,7 +341,11 @@ return (ClassPathEntry*) _resolved_entry; } ClassPathEntry* new_entry = NULL; - new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL); + new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, _throw_exception, CHECK_NULL); + if (!_throw_exception && new_entry == NULL) { + assert(!HAS_PENDING_EXCEPTION, "must be"); + return NULL; + } { ThreadCritical tc; if (_resolved_entry == NULL) { @@ -340,6 +379,23 @@ return true; } +u1* LazyClassPathEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) { + if (_has_error) { + return NULL; + } + ClassPathEntry* cpe = resolve_entry(THREAD); + if (cpe == NULL) { + _has_error = true; + return NULL; + } else if (cpe->is_jar_file()) { + return ((ClassPathZipEntry*)cpe)->open_entry(name, filesize, nul_terminate,THREAD); + } else { + ShouldNotReachHere(); + *filesize = 0; + return NULL; + } +} + static void print_meta_index(LazyClassPathEntry* entry, GrowableArray& meta_packages) { tty->print("[Meta index for %s=", entry->name()); @@ -350,15 +406,62 @@ tty->print_cr("]"); } +#if INCLUDE_CDS +void ClassLoader::exit_with_path_failure(const char* error, const char* message) { + assert(DumpSharedSpaces, "only called at dump time"); + tty->print_cr("Hint: enable -XX:+TraceClassPaths to diagnose the failure"); + vm_exit_during_initialization(error, message); +} +#endif -void ClassLoader::setup_meta_index() { +void ClassLoader::trace_class_path(const char* msg, const char* name) { + if (!TraceClassPaths) { + return; + } + + if (msg) { + tty->print("%s", msg); + } + if (name) { + if (strlen(name) < 256) { + tty->print("%s", name); + } else { + // For very long paths, we need to print each character separately, + // as print_cr() has a length limit + while (name[0] != '\0') { + tty->print("%c", name[0]); + name++; + } + } + } + if (msg && msg[0] == '[') { + tty->print_cr("]"); + } else { + tty->cr(); + } +} + +void ClassLoader::setup_bootstrap_meta_index() { // Set up meta index which allows us to open boot jars lazily if // class data sharing is enabled + const char* meta_index_path = Arguments::get_meta_index_path(); + const char* meta_index_dir = Arguments::get_meta_index_dir(); + setup_meta_index(meta_index_path, meta_index_dir, 0); +} + +void ClassLoader::setup_meta_index(const char* meta_index_path, const char* meta_index_dir, int start_index) { const char* known_version = "% VERSION 2"; - char* meta_index_path = Arguments::get_meta_index_path(); - char* meta_index_dir = Arguments::get_meta_index_dir(); FILE* file = fopen(meta_index_path, "r"); int line_no = 0; +#if INCLUDE_CDS + if (DumpSharedSpaces) { + if (file != NULL) { + _shared_paths_misc_info->add_required_file(meta_index_path); + } else { + _shared_paths_misc_info->add_nonexist_path(meta_index_path); + } + } +#endif if (file != NULL) { ResourceMark rm; LazyClassPathEntry* cur_entry = NULL; @@ -393,7 +496,7 @@ // Hand off current packages to current lazy entry (if any) if ((cur_entry != NULL) && (boot_class_path_packages.length() > 0)) { - if (TraceClassLoading && Verbose) { + if ((TraceClassLoading || TraceClassPaths) && Verbose) { print_meta_index(cur_entry, boot_class_path_packages); } MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0), @@ -404,8 +507,10 @@ boot_class_path_packages.clear(); // Find lazy entry corresponding to this jar file - for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next()) { - if (entry->is_lazy() && + int count = 0; + for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next(), count++) { + if (count >= start_index && + entry->is_lazy() && string_starts_with(entry->name(), meta_index_dir) && string_ends_with(entry->name(), &package_name[2])) { cur_entry = (LazyClassPathEntry*) entry; @@ -442,7 +547,7 @@ // Hand off current packages to current lazy entry (if any) if ((cur_entry != NULL) && (boot_class_path_packages.length() > 0)) { - if (TraceClassLoading && Verbose) { + if ((TraceClassLoading || TraceClassPaths) && Verbose) { print_meta_index(cur_entry, boot_class_path_packages); } MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0), @@ -453,36 +558,96 @@ } } +#if INCLUDE_CDS +void ClassLoader::check_shared_classpath(const char *path) { + if (strcmp(path, "") == 0) { + exit_with_path_failure("Cannot have empty path in archived classpaths", NULL); + } + + struct stat st; + if (os::stat(path, &st) == 0) { + if ((st.st_mode & S_IFREG) != S_IFREG) { // is directory + if (!os::dir_is_empty(path)) { + tty->print_cr("Error: non-empty directory '%s'", path); + exit_with_path_failure("CDS allows only empty directories in archived classpaths", NULL); + } + } + } +} +#endif + void ClassLoader::setup_bootstrap_search_path() { assert(_first_entry == NULL, "should not setup bootstrap class search path twice"); - char* sys_class_path = os::strdup(Arguments::get_sysclasspath()); - if (TraceClassLoading && Verbose) { - tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path); + const char* sys_class_path = Arguments::get_sysclasspath(); + if (PrintSharedArchiveAndExit) { + // Don't print sys_class_path - this is the bootcp of this current VM process, not necessarily + // the same as the bootcp of the shared archive. + } else { + trace_class_path("[Bootstrap loader class path=", sys_class_path); } +#if INCLUDE_CDS + if (DumpSharedSpaces) { + _shared_paths_misc_info->add_boot_classpath(sys_class_path); + } +#endif + setup_search_path(sys_class_path); +} - int len = (int)strlen(sys_class_path); +#if INCLUDE_CDS +int ClassLoader::get_shared_paths_misc_info_size() { + return _shared_paths_misc_info->get_used_bytes(); +} + +void* ClassLoader::get_shared_paths_misc_info() { + return _shared_paths_misc_info->buffer(); +} + +bool ClassLoader::check_shared_paths_misc_info(void *buf, int size) { + SharedPathsMiscInfo* checker = SharedClassUtil::allocate_shared_paths_misc_info((char*)buf, size); + bool result = checker->check(); + delete checker; + return result; +} +#endif + +void ClassLoader::setup_search_path(const char *class_path, bool canonicalize) { + int offset = 0; + int len = (int)strlen(class_path); int end = 0; // Iterate over class path entries for (int start = 0; start < len; start = end) { - while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) { + while (class_path[end] && class_path[end] != os::path_separator()[0]) { end++; } - char* path = NEW_C_HEAP_ARRAY(char, end-start+1, mtClass); - strncpy(path, &sys_class_path[start], end-start); - path[end-start] = '\0'; - update_class_path_entry_list(path, false); - FREE_C_HEAP_ARRAY(char, path, mtClass); - while (sys_class_path[end] == os::path_separator()[0]) { + EXCEPTION_MARK; + ResourceMark rm(THREAD); + char* path = NEW_RESOURCE_ARRAY(char, end - start + 1); + strncpy(path, &class_path[start], end - start); + path[end - start] = '\0'; + if (canonicalize) { + char* canonical_path = NEW_RESOURCE_ARRAY(char, JVM_MAXPATHLEN + 1); + if (get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) { + path = canonical_path; + } + } + update_class_path_entry_list(path, /*check_for_duplicates=*/canonicalize); +#if INCLUDE_CDS + if (DumpSharedSpaces) { + check_shared_classpath(path); + } +#endif + while (class_path[end] == os::path_separator()[0]) { end++; } } } -ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) { +ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const struct stat* st, + bool lazy, bool throw_exception, TRAPS) { JavaThread* thread = JavaThread::current(); if (lazy) { - return new LazyClassPathEntry(path, st); + return new LazyClassPathEntry(path, st, throw_exception); } ClassPathEntry* new_entry = NULL; if ((st->st_mode & S_IFREG) == S_IFREG) { @@ -491,7 +656,11 @@ char canonical_path[JVM_MAXPATHLEN]; if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) { // This matches the classic VM - THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL); + if (throw_exception) { + THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL); + } else { + return NULL; + } } char* error_msg = NULL; jzfile* zip; @@ -503,7 +672,7 @@ } if (zip != NULL && error_msg == NULL) { new_entry = new ClassPathZipEntry(zip, path); - if (TraceClassLoading) { + if (TraceClassLoading || TraceClassPaths) { tty->print_cr("[Opened %s]", path); } } else { @@ -517,12 +686,16 @@ msg = NEW_RESOURCE_ARRAY(char, len); ; jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path); } - THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL); + if (throw_exception) { + THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL); + } else { + return NULL; + } } } else { // Directory new_entry = new ClassPathDirEntry(path); - if (TraceClassLoading) { + if (TraceClassLoading || TraceClassPaths) { tty->print_cr("[Path %s]", path); } } @@ -537,11 +710,8 @@ struct stat st; if (os::stat(path, &st) == 0) { if ((st.st_mode & S_IFREG) == S_IFREG) { - char orig_path[JVM_MAXPATHLEN]; char canonical_path[JVM_MAXPATHLEN]; - - strcpy(orig_path, path); - if (get_canonical_path(orig_path, canonical_path, JVM_MAXPATHLEN)) { + if (get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) { char* error_msg = NULL; jzfile* zip; { @@ -583,23 +753,37 @@ _last_entry = new_entry; } } + _num_entries ++; } -void ClassLoader::update_class_path_entry_list(char *path, - bool check_for_duplicates) { +// Returns true IFF the file/dir exists and the entry was successfully created. +bool ClassLoader::update_class_path_entry_list(const char *path, + bool check_for_duplicates, + bool throw_exception) { struct stat st; if (os::stat(path, &st) == 0) { // File or directory found ClassPathEntry* new_entry = NULL; Thread* THREAD = Thread::current(); - new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK); + new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, throw_exception, CHECK_(false)); + if (new_entry == NULL) { + return false; + } // The kernel VM adds dynamically to the end of the classloader path and // doesn't reorder the bootclasspath which would break java.lang.Package // (see PackageInfo). // Add new entry to linked list if (!check_for_duplicates || !contains_entry(new_entry)) { - add_to_list(new_entry); + ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry); } + return true; + } else { +#if INCLUDE_CDS + if (DumpSharedSpaces) { + _shared_paths_misc_info->add_nonexist_path(path); + } +#endif + return false; } } @@ -758,10 +942,10 @@ assert(n == number_of_entries(), "just checking"); } - void copy_table(char** top, char* end, PackageHashtable* table); + CDS_ONLY(void copy_table(char** top, char* end, PackageHashtable* table);) }; - +#if INCLUDE_CDS void PackageHashtable::copy_table(char** top, char* end, PackageHashtable* table) { // Copy (relocate) the table to the shared space. @@ -769,33 +953,30 @@ // Calculate the space needed for the package name strings. int i; - int n = 0; - for (i = 0; i < table_size(); ++i) { - for (PackageInfo* pp = table->bucket(i); - pp != NULL; - pp = pp->next()) { - n += (int)(strlen(pp->pkgname()) + 1); - } - } - if (*top + n + sizeof(intptr_t) >= end) { - report_out_of_shared_space(SharedMiscData); - } - - // Copy the table data (the strings) to the shared space. - n = align_size_up(n, sizeof(HeapWord)); - *(intptr_t*)(*top) = n; - *top += sizeof(intptr_t); + intptr_t* tableSize = (intptr_t*)(*top); + *top += sizeof(intptr_t); // For table size + char* tableStart = *top; for (i = 0; i < table_size(); ++i) { for (PackageInfo* pp = table->bucket(i); pp != NULL; pp = pp->next()) { int n1 = (int)(strlen(pp->pkgname()) + 1); + if (*top + n1 >= end) { + report_out_of_shared_space(SharedMiscData); + } pp->set_pkgname((char*)memcpy(*top, pp->pkgname(), n1)); *top += n1; } } *top = (char*)align_size_up((intptr_t)*top, sizeof(HeapWord)); + if (*top >= end) { + report_out_of_shared_space(SharedMiscData); + } + + // Write table size + intptr_t len = *top - (char*)tableStart; + *tableSize = len; } @@ -806,7 +987,7 @@ void ClassLoader::copy_package_info_table(char** top, char* end) { _package_hash_table->copy_table(top, end, _package_hash_table); } - +#endif PackageInfo* ClassLoader::lookup_package(const char *pkgname) { const char *cp = strrchr(pkgname, '/'); @@ -899,7 +1080,8 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) { ResourceMark rm(THREAD); - EventMark m("loading class %s", h_name->as_C_string()); + const char* class_name = h_name->as_C_string(); + EventMark m("loading class %s", class_name); ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion); stringStream st; @@ -907,18 +1089,24 @@ // st.print("%s.class", h_name->as_utf8()); st.print_raw(h_name->as_utf8()); st.print_raw(".class"); - char* name = st.as_string(); + const char* file_name = st.as_string(); + ClassLoaderExt::Context context(class_name, file_name, THREAD); // Lookup stream for parsing .class file ClassFileStream* stream = NULL; int classpath_index = 0; + ClassPathEntry* e = NULL; + instanceKlassHandle h; { PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(), ((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(), PerfClassTraceTime::CLASS_LOAD); - ClassPathEntry* e = _first_entry; + e = _first_entry; while (e != NULL) { - stream = e->open_stream(name, CHECK_NULL); + stream = e->open_stream(file_name, CHECK_NULL); + if (!context.check(stream, classpath_index)) { + return h; // NULL + } if (stream != NULL) { break; } @@ -927,9 +1115,7 @@ } } - instanceKlassHandle h; if (stream != NULL) { - // class file found, parse it ClassFileParser parser(stream); ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data(); @@ -939,12 +1125,19 @@ loader_data, protection_domain, parsed_name, - false, - CHECK_(h)); - - // add to package table - if (add_package(name, classpath_index, THREAD)) { - h = result; + context.should_verify(classpath_index), + THREAD); + if (HAS_PENDING_EXCEPTION) { + ResourceMark rm; + if (DumpSharedSpaces) { + tty->print_cr("Preload Error: Failed to load %s", class_name); + } + return h; + } + h = context.record_result(classpath_index, e, result, THREAD); + } else { + if (DumpSharedSpaces) { + tty->print_cr("Preload Warning: Cannot find %s", class_name); } } @@ -1039,14 +1232,27 @@ // lookup zip library entry points load_zip_library(); +#if INCLUDE_CDS // initialize search path + if (DumpSharedSpaces) { + _shared_paths_misc_info = SharedClassUtil::allocate_shared_paths_misc_info(); + } +#endif setup_bootstrap_search_path(); if (LazyBootClassLoader) { // set up meta index which makes boot classpath initialization lazier - setup_meta_index(); + setup_bootstrap_meta_index(); } } +#if INCLUDE_CDS +void ClassLoader::initialize_shared_path() { + if (DumpSharedSpaces) { + ClassLoaderExt::setup_search_paths(); + _shared_paths_misc_info->write_jint(0); // see comments in SharedPathsMiscInfo::check() + } +} +#endif jlong ClassLoader::classloader_time_ms() { return UsePerfData ? @@ -1090,11 +1296,17 @@ } -bool ClassLoader::get_canonical_path(char* orig, char* out, int len) { +bool ClassLoader::get_canonical_path(const char* orig, char* out, int len) { assert(orig != NULL && out != NULL && len > 0, "bad arguments"); if (CanonicalizeEntry != NULL) { - JNIEnv* env = JavaThread::current()->jni_environment(); - if ((CanonicalizeEntry)(env, os::native_path(orig), out, len) < 0) { + JavaThread* THREAD = JavaThread::current(); + JNIEnv* env = THREAD->jni_environment(); + ResourceMark rm(THREAD); + + // os::native_path writes into orig_copy + char* orig_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, strlen(orig)+1); + strcpy(orig_copy, orig); + if ((CanonicalizeEntry)(env, os::native_path(orig_copy), out, len) < 0) { return false; } } else { @@ -1412,7 +1624,7 @@ if (TieredCompilation && TieredStopAtLevel >= CompLevel_full_optimization) { // Clobber the first compile and force second tier compilation nmethod* nm = m->code(); - if (nm != NULL) { + if (nm != NULL && !m->is_method_handle_intrinsic()) { // Throw out the code so that the code cache doesn't fill up nm->make_not_entrant(); m->clear_code(); @@ -1431,7 +1643,7 @@ } nmethod* nm = m->code(); - if (nm != NULL) { + if (nm != NULL && !m->is_method_handle_intrinsic()) { // Throw out the code so that the code cache doesn't fill up nm->make_not_entrant(); m->clear_code(); --- ./hotspot/src/share/vm/classfile/classLoader.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/classLoader.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -72,11 +72,11 @@ class ClassPathDirEntry: public ClassPathEntry { private: - char* _dir; // Name of directory + const char* _dir; // Name of directory public: bool is_jar_file() { return false; } const char* name() { return _dir; } - ClassPathDirEntry(char* dir); + ClassPathDirEntry(const char* dir); ClassFileStream* open_stream(const char* name, TRAPS); // Debugging NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);) @@ -100,13 +100,14 @@ class ClassPathZipEntry: public ClassPathEntry { private: - jzfile* _zip; // The zip archive - char* _zip_name; // Name of zip archive + jzfile* _zip; // The zip archive + const char* _zip_name; // Name of zip archive public: bool is_jar_file() { return true; } const char* name() { return _zip_name; } ClassPathZipEntry(jzfile* zip, const char* zip_name); ~ClassPathZipEntry(); + u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS); ClassFileStream* open_stream(const char* name, TRAPS); void contents_do(void f(const char* name, void* context), void* context); // Debugging @@ -122,16 +123,18 @@ // For lazier loading of boot class path entries class LazyClassPathEntry: public ClassPathEntry { private: - char* _path; // dir or file + const char* _path; // dir or file struct stat _st; MetaIndex* _meta_index; bool _has_error; + bool _throw_exception; volatile ClassPathEntry* _resolved_entry; + public: ClassPathEntry* resolve_entry(TRAPS); - public: bool is_jar_file(); const char* name() { return _path; } - LazyClassPathEntry(char* path, const struct stat* st); + LazyClassPathEntry(const char* path, const struct stat* st, bool throw_exception); + u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS); ClassFileStream* open_stream(const char* name, TRAPS); void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; } virtual bool is_lazy(); @@ -142,6 +145,7 @@ class PackageHashtable; class PackageInfo; +class SharedPathsMiscInfo; template class HashtableBucket; class ClassLoader: AllStatic { @@ -149,7 +153,7 @@ enum SomeConstants { package_hash_table_size = 31 // Number of buckets }; - private: + protected: friend class LazyClassPathEntry; // Performance counters @@ -191,10 +195,15 @@ static ClassPathEntry* _first_entry; // Last entry in linked list of ClassPathEntry instances static ClassPathEntry* _last_entry; + static int _num_entries; + // Hash table used to keep track of loaded packages static PackageHashtable* _package_hash_table; static const char* _shared_archive; + // Info used by CDS + CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;) + // Hash function static unsigned int hash(const char *s, int n); // Returns the package file name corresponding to the specified package @@ -205,20 +214,24 @@ static bool add_package(const char *pkgname, int classpath_index, TRAPS); // Initialization - static void setup_meta_index(); + static void setup_bootstrap_meta_index(); + static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir, + int start_index); static void setup_bootstrap_search_path(); + static void setup_search_path(const char *class_path, bool canonicalize=false); + static void load_zip_library(); - static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st, - bool lazy, TRAPS); + static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st, + bool lazy, bool throw_exception, TRAPS); // Canonicalizes path names, so strcmp will work properly. This is mainly // to avoid confusing the zip library - static bool get_canonical_path(char* orig, char* out, int len); + static bool get_canonical_path(const char* orig, char* out, int len); public: static int crc32(int crc, const char* buf, int len); - // Used by the kernel jvm. - static void update_class_path_entry_list(char *path, - bool check_for_duplicates); + static bool update_class_path_entry_list(const char *path, + bool check_for_duplicates, + bool throw_exception=true); static void print_bootclasspath(); // Timing @@ -301,6 +314,7 @@ // Initialization static void initialize(); + CDS_ONLY(static void initialize_shared_path();) static void create_package_info_table(); static void create_package_info_table(HashtableBucket *t, int length, int number_of_entries); @@ -315,10 +329,25 @@ return e; } + static int num_classpath_entries() { + return _num_entries; + } + +#if INCLUDE_CDS // Sharing dump and restore static void copy_package_info_buckets(char** top, char* end); static void copy_package_info_table(char** top, char* end); + static void check_shared_classpath(const char *path); + static void finalize_shared_paths_misc_info(); + static int get_shared_paths_misc_info_size(); + static void* get_shared_paths_misc_info(); + static bool check_shared_paths_misc_info(void* info, int size); + static void exit_with_path_failure(const char* error, const char* message); +#endif + + static void trace_class_path(const char* msg, const char* name = NULL); + // VM monitoring and management support static jlong classloader_time_ms(); static jlong class_method_total_size(); @@ -342,7 +371,7 @@ // Force compilation of all methods in all classes in bootstrap class path (stress test) #ifndef PRODUCT - private: + protected: static int _compile_the_world_class_counter; static int _compile_the_world_method_counter; public: --- ./hotspot/src/share/vm/classfile/classLoaderData.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/classLoaderData.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -64,16 +64,19 @@ #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" - #if INCLUDE_TRACE - #include "trace/tracing.hpp" +#include "trace/tracing.hpp" #endif ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : _class_loader(h_class_loader()), - _is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially + _is_anonymous(is_anonymous), + // An anonymous class loader data doesn't have anything to keep + // it from being unloaded during parsing of the anonymous class. + // The null-class-loader should always be kept alive. + _keep_alive(is_anonymous || h_class_loader.is_null()), _metaspace(NULL), _unloading(false), _klasses(NULL), _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL), _next(NULL), _dependencies(dependencies), @@ -317,12 +320,45 @@ } } +#ifdef ASSERT +class AllAliveClosure : public OopClosure { + BoolObjectClosure* _is_alive_closure; + bool _found_dead; + public: + AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {} + template void do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!_is_alive_closure->do_object_b(obj)) { + _found_dead = true; + } + } + } + void do_oop(oop* p) { do_oop_work(p); } + void do_oop(narrowOop* p) { do_oop_work(p); } + bool found_dead() { return _found_dead; } +}; +#endif + +oop ClassLoaderData::keep_alive_object() const { + assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive"); + return is_anonymous() ? _klasses->java_mirror() : class_loader(); +} + bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const { - bool alive = - is_anonymous() ? - is_alive_closure->do_object_b(_klasses->java_mirror()) : - class_loader() == NULL || is_alive_closure->do_object_b(class_loader()); - assert(!alive || claimed(), "must be claimed"); + bool alive = keep_alive() // null class loader and incomplete anonymous klasses. + || is_alive_closure->do_object_b(keep_alive_object()); + +#ifdef ASSERT + if (alive) { + AllAliveClosure all_alive_closure(is_alive_closure); + KlassToOopClosure klass_closure(&all_alive_closure); + const_cast(this)->oops_do(&all_alive_closure, &klass_closure, false); + assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this))); + } +#endif + return alive; } @@ -601,11 +637,36 @@ void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) { if (ClassUnloading) { - ClassLoaderData::the_null_class_loader_data()->oops_do(f, klass_closure, must_claim); - // keep any special CLDs alive. - ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim); + keep_alive_oops_do(f, klass_closure, must_claim); } else { - ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim); + oops_do(f, klass_closure, must_claim); + } +} + +void ClassLoaderDataGraph::cld_do(CLDClosure* cl) { + for (ClassLoaderData* cld = _head; cl != NULL && cld != NULL; cld = cld->next()) { + cl->do_cld(cld); + } +} + +void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) { + for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->_next) { + CLDClosure* closure = cld->keep_alive() ? strong : weak; + if (closure != NULL) { + closure->do_cld(cld); + } + } +} + +void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) { + roots_cld_do(cl, NULL); +} + +void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) { + if (ClassUnloading) { + keep_alive_cld_do(cl); + } else { + cld_do(cl); } } @@ -660,6 +721,16 @@ return array; } +bool ClassLoaderDataGraph::unload_list_contains(const void* x) { + assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint"); + for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) { + if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) { + return true; + } + } + return false; +} + #ifndef PRODUCT bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) { for (ClassLoaderData* data = _head; data != NULL; data = data->next()) { @@ -675,7 +746,7 @@ // Move class loader data from main list to the unloaded list for unloading // and deallocation later. -bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) { +bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, bool clean_alive) { ClassLoaderData* data = _head; ClassLoaderData* prev = NULL; bool seen_dead_loader = false; @@ -684,16 +755,8 @@ // purging and we don't want to rewalk the previously unloaded class loader data. _saved_unloading = _unloading; - // mark metadata seen on the stack and code cache so we can delete - // unneeded entries. - bool has_redefined_a_class = JvmtiExport::has_redefined_a_class(); - MetadataOnStackMark md_on_stack; while (data != NULL) { - if (data->keep_alive() || data->is_alive(is_alive_closure)) { - if (has_redefined_a_class) { - data->classes_do(InstanceKlass::purge_previous_versions); - } - data->free_deallocate_list(); + if (data->is_alive(is_alive_closure)) { prev = data; data = data->next(); continue; @@ -715,6 +778,11 @@ _unloading = dead; } + if (clean_alive) { + // Clean previous versions and the deallocate list. + ClassLoaderDataGraph::clean_metaspaces(); + } + if (seen_dead_loader) { post_class_unload_events(); } @@ -722,6 +790,26 @@ return seen_dead_loader; } +void ClassLoaderDataGraph::clean_metaspaces() { + // mark metadata seen on the stack and code cache so we can delete unneeded entries. + bool has_redefined_a_class = JvmtiExport::has_redefined_a_class(); + MetadataOnStackMark md_on_stack(has_redefined_a_class); + + if (has_redefined_a_class) { + // purge_previous_versions also cleans weak method links. Because + // one method's MDO can reference another method from another + // class loader, we need to first clean weak method links for all + // class loaders here. Below, we can then free redefined methods + // for all class loaders. + for (ClassLoaderData* data = _head; data != NULL; data = data->next()) { + data->classes_do(InstanceKlass::purge_previous_versions); + } + } + + // Need to purge the previous version before deallocating. + free_deallocate_lists(); +} + void ClassLoaderDataGraph::purge() { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); ClassLoaderData* list = _unloading; @@ -749,6 +837,14 @@ #endif } +void ClassLoaderDataGraph::free_deallocate_lists() { + for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { + // We need to keep this data until InstanceKlass::purge_previous_version has been + // called on all alive classes. See the comment in ClassLoaderDataGraph::clean_metaspaces. + cld->free_deallocate_list(); + } +} + // CDS support // Global metaspaces for writing information to the shared archive. When @@ -780,6 +876,60 @@ return _rw_metaspace; } +ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic() + : _next_klass(NULL) { + ClassLoaderData* cld = ClassLoaderDataGraph::_head; + Klass* klass = NULL; + + // Find the first klass in the CLDG. + while (cld != NULL) { + klass = cld->_klasses; + if (klass != NULL) { + _next_klass = klass; + return; + } + cld = cld->next(); + } +} + +Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass) { + Klass* next = klass->next_link(); + if (next != NULL) { + return next; + } + + // No more klasses in the current CLD. Time to find a new CLD. + ClassLoaderData* cld = klass->class_loader_data(); + while (next == NULL) { + cld = cld->next(); + if (cld == NULL) { + break; + } + next = cld->_klasses; + } + + return next; +} + +Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() { + Klass* head = (Klass*)_next_klass; + + while (head != NULL) { + Klass* next = next_klass_in_cldg(head); + + Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head); + + if (old_head == head) { + return head; // Won the CAS. + } + + head = old_head; + } + + // Nothing more for the iterator to hand out. + assert(head == NULL, err_msg("head is " PTR_FORMAT ", expected not null:", p2i(head))); + return NULL; +} ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() { _data = ClassLoaderDataGraph::_head; @@ -833,4 +983,4 @@ event.commit(); } -#endif /* INCLUDE_TRACE */ +#endif // INCLUDE_TRACE --- ./hotspot/src/share/vm/classfile/classLoaderData.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/classLoaderData.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -31,9 +31,9 @@ #include "memory/metaspaceCounters.hpp" #include "runtime/mutex.hpp" #include "utilities/growableArray.hpp" - +#include "utilities/macros.hpp" #if INCLUDE_TRACE -# include "utilities/ticks.hpp" +#include "utilities/ticks.hpp" #endif // @@ -59,6 +59,7 @@ class ClassLoaderDataGraph : public AllStatic { friend class ClassLoaderData; friend class ClassLoaderDataGraphMetaspaceIterator; + friend class ClassLoaderDataGraphKlassIteratorAtomic; friend class VMStructs; private: // All CLDs (except the null CLD) can be reached by walking _head->_next->... @@ -71,18 +72,26 @@ static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); static void post_class_unload_events(void); + static void clean_metaspaces(); public: static ClassLoaderData* find_or_create(Handle class_loader, TRAPS); static void purge(); static void clear_claimed_marks(); + // oops do static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim); + static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); - static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); + // cld do + static void cld_do(CLDClosure* cl); + static void roots_cld_do(CLDClosure* strong, CLDClosure* weak); + static void keep_alive_cld_do(CLDClosure* cl); + static void always_strong_cld_do(CLDClosure* cl); + // klass do static void classes_do(KlassClosure* klass_closure); static void classes_do(void f(Klass* const)); static void loaded_classes_do(KlassClosure* klass_closure); static void classes_unloading_do(void f(Klass* const)); - static bool do_unloading(BoolObjectClosure* is_alive); + static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive); // CMS support. static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); } @@ -98,10 +107,13 @@ } } + static void free_deallocate_lists(); + static void dump_on(outputStream * const out) PRODUCT_RETURN; static void dump() { dump_on(tty); } static void verify(); + static bool unload_list_contains(const void* x); #ifndef PRODUCT static bool contains_loader_data(ClassLoaderData* loader_data); #endif @@ -134,6 +146,7 @@ }; friend class ClassLoaderDataGraph; + friend class ClassLoaderDataGraphKlassIteratorAtomic; friend class ClassLoaderDataGraphMetaspaceIterator; friend class MetaDataFactory; friend class Method; @@ -149,7 +162,7 @@ // classes in the class loader are allocated. Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup. bool _unloading; // true if this class loader goes away - bool _keep_alive; // if this CLD can be unloaded for anonymous loaders + bool _keep_alive; // if this CLD is kept alive without a keep_alive_object(). bool _is_anonymous; // if this CLD is for an anonymous class volatile int _claimed; // true if claimed, for example during GC traces. // To avoid applying oop closure more than once. @@ -195,7 +208,6 @@ void unload(); bool keep_alive() const { return _keep_alive; } - bool is_alive(BoolObjectClosure* is_alive_closure) const; void classes_do(void f(Klass*)); void loaded_classes_do(KlassClosure* klass_closure); void classes_do(void f(InstanceKlass*)); @@ -207,6 +219,9 @@ MetaWord* allocate(size_t size); public: + + bool is_alive(BoolObjectClosure* is_alive_closure) const; + // Accessors Metaspace* metaspace_or_null() const { return _metaspace; } @@ -240,13 +255,16 @@ oop class_loader() const { return _class_loader; } + // The object the GC is using to keep this ClassLoaderData alive. + oop keep_alive_object() const; + // Returns true if this class loader data is for a loader going away. bool is_unloading() const { assert(!(is_the_null_class_loader_data() && _unloading), "The null class loader can never be unloaded"); return _unloading; } - // Anonymous class loader data doesn't have anything to keep them from - // being unloaded during parsing the anonymous class. + + // Used to make sure that this CLD is not unloaded. void set_keep_alive(bool value) { _keep_alive = value; } unsigned int identity_hash() { @@ -287,6 +305,16 @@ void initialize_shared_metaspaces(); }; +// An iterator that distributes Klasses to parallel worker threads. +class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj { + volatile Klass* _next_klass; + public: + ClassLoaderDataGraphKlassIteratorAtomic(); + Klass* next_klass(); + private: + static Klass* next_klass_in_cldg(Klass* klass); +}; + class ClassLoaderDataGraphMetaspaceIterator : public StackObj { ClassLoaderData* _data; public: --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/classfile/classLoaderExt.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP +#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP + +#include "classfile/classLoader.hpp" + +class ClassLoaderExt: public ClassLoader { // AllStatic +public: + + class Context { + const char* _file_name; + public: + Context(const char* class_name, const char* file_name, TRAPS) { + _file_name = file_name; + } + + bool check(ClassFileStream* stream, const int classpath_index) { + return true; + } + + bool should_verify(int classpath_index) { + return false; + } + + instanceKlassHandle record_result(const int classpath_index, + ClassPathEntry* e, instanceKlassHandle result, TRAPS) { + if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) { + if (DumpSharedSpaces) { + result->set_shared_classpath_index(classpath_index); + } + return result; + } else { + return instanceKlassHandle(); // NULL + } + } + }; + + + static void add_class_path_entry(const char* path, bool check_for_duplicates, + ClassPathEntry* new_entry) { + ClassLoader::add_to_list(new_entry); + } + static void append_boot_classpath(ClassPathEntry* new_entry) { + ClassLoader::add_to_list(new_entry); + } + static void setup_search_paths() {} + + static void init_lookup_cache(TRAPS) {} + static void copy_lookup_cache_to_archive(char** top, char* end) {} + static char* restore_lookup_cache_from_archive(char* buffer) {return buffer;} + static inline bool is_lookup_cache_enabled() {return false;} + + static bool known_to_not_exist(JNIEnv *env, jobject loader, const char *classname, TRAPS) {return false;} + static jobjectArray get_lookup_cache_urls(JNIEnv *env, jobject loader, TRAPS) {return NULL;} + static jintArray get_lookup_cache(JNIEnv *env, jobject loader, const char *pkgname, TRAPS) {return NULL;} +}; + +#endif // SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP --- ./hotspot/src/share/vm/classfile/dictionary.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/dictionary.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,6 +28,7 @@ #include "memory/iterator.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" +#include "runtime/orderAccess.inline.hpp" #include "utilities/hashtable.inline.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -129,15 +130,13 @@ } -bool Dictionary::do_unloading() { +void Dictionary::do_unloading() { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - bool class_was_unloaded = false; - int index = 0; // Defined here for portability! Do not move // Remove unloadable entries and classes from system dictionary // The placeholder array has been handled in always_strong_oops_do. DictionaryEntry* probe = NULL; - for (index = 0; index < table_size(); index++) { + for (int index = 0; index < table_size(); index++) { for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) { probe = *p; Klass* e = probe->klass(); @@ -157,16 +156,8 @@ // Do we need to delete this system dictionary entry? if (loader_data->is_unloading()) { // If the loader is not live this entry should always be - // removed (will never be looked up again). Note that this is - // not the same as unloading the referred class. - if (k_def_class_loader_data == loader_data) { - // This is the defining entry, so the referred class is about - // to be unloaded. - class_was_unloaded = true; - } - // Also remove this system dictionary entry. + // removed (will never be looked up again). purge_entry = true; - } else { // The loader in this entry is alive. If the klass is dead, // (determined by checking the defining class loader) @@ -195,9 +186,51 @@ p = probe->next_addr(); } } - return class_was_unloaded; } +void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) { + // Skip the strong roots probe marking if the closures are the same. + if (strong == weak) { + oops_do(strong); + return; + } + + for (int index = 0; index < table_size(); index++) { + for (DictionaryEntry *probe = bucket(index); + probe != NULL; + probe = probe->next()) { + Klass* e = probe->klass(); + ClassLoaderData* loader_data = probe->loader_data(); + if (is_strongly_reachable(loader_data, e)) { + probe->set_strongly_reachable(); + } + } + } + _pd_cache_table->roots_oops_do(strong, weak); +} + +void Dictionary::remove_classes_in_error_state() { + assert(DumpSharedSpaces, "supported only when dumping"); + DictionaryEntry* probe = NULL; + for (int index = 0; index < table_size(); index++) { + for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) { + probe = *p; + InstanceKlass* ik = InstanceKlass::cast(probe->klass()); + if (ik->is_in_error_state()) { // purge this entry + *p = probe->next(); + if (probe == _current_class_entry) { + _current_class_entry = NULL; + } + free_entry(probe); + ResourceMark rm; + tty->print_cr("Preload Warning: Removed error class: %s", ik->external_name()); + continue; + } + + p = probe->next_addr(); + } + } +} void Dictionary::always_strong_oops_do(OopClosure* blk) { // Follow all system classes and temporary placeholders in dictionary; only @@ -489,6 +522,23 @@ } } +void ProtectionDomainCacheTable::roots_oops_do(OopClosure* strong, OopClosure* weak) { + for (int index = 0; index < table_size(); index++) { + for (ProtectionDomainCacheEntry* probe = bucket(index); + probe != NULL; + probe = probe->next()) { + if (probe->is_strongly_reachable()) { + probe->reset_strongly_reachable(); + probe->oops_do(strong); + } else { + if (weak != NULL) { + probe->oops_do(weak); + } + } + } + } +} + uint ProtectionDomainCacheTable::bucket_size() { return sizeof(ProtectionDomainCacheEntry); } @@ -655,16 +705,17 @@ // ---------------------------------------------------------------------------- -#ifndef PRODUCT -void Dictionary::print() { +void Dictionary::print(bool details) { ResourceMark rm; HandleMark hm; - tty->print_cr("Java system dictionary (table_size=%d, classes=%d)", - table_size(), number_of_entries()); - tty->print_cr("^ indicates that initiating loader is different from " - "defining loader"); + if (details) { + tty->print_cr("Java system dictionary (table_size=%d, classes=%d)", + table_size(), number_of_entries()); + tty->print_cr("^ indicates that initiating loader is different from " + "defining loader"); + } for (int index = 0; index < table_size(); index++) { for (DictionaryEntry* probe = bucket(index); @@ -675,21 +726,28 @@ ClassLoaderData* loader_data = probe->loader_data(); bool is_defining_class = (loader_data == InstanceKlass::cast(e)->class_loader_data()); - tty->print("%s%s", is_defining_class ? " " : "^", + tty->print("%s%s", ((!details) || is_defining_class) ? " " : "^", e->external_name()); + if (details) { tty->print(", loader "); - loader_data->print_value(); + if (loader_data != NULL) { + loader_data->print_value(); + } else { + tty->print("NULL"); + } + } tty->cr(); } } - tty->cr(); - _pd_cache_table->print(); + + if (details) { + tty->cr(); + _pd_cache_table->print(); + } tty->cr(); } -#endif - void Dictionary::verify() { guarantee(number_of_entries() >= 0, "Verify of system dictionary failed"); --- ./hotspot/src/share/vm/classfile/dictionary.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/dictionary.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -89,6 +89,7 @@ // GC support void oops_do(OopClosure* f); void always_strong_oops_do(OopClosure* blk); + void roots_oops_do(OopClosure* strong, OopClosure* weak); void always_strong_classes_do(KlassClosure* closure); @@ -99,6 +100,7 @@ void methods_do(void f(Method*)); void unlink(BoolObjectClosure* is_alive); + void remove_classes_in_error_state(); // Classes loaded by the bootstrap loader are always strongly reachable. // If we're not doing class unloading, all classes are strongly reachable. @@ -107,9 +109,8 @@ return (loader_data->is_the_null_class_loader_data() || !ClassUnloading); } - // Unload (that is, break root links to) all unmarked classes and - // loaders. Returns "true" iff something was unloaded. - bool do_unloading(); + // Unload (that is, break root links to) all unmarked classes and loaders. + void do_unloading(); // Protection domains Klass* find(int index, unsigned int hash, Symbol* name, @@ -126,9 +127,7 @@ ProtectionDomainCacheEntry* cache_get(oop protection_domain); -#ifndef PRODUCT - void print(); -#endif + void print(bool details = true); void verify(); }; @@ -218,6 +217,7 @@ // GC support void oops_do(OopClosure* f); void always_strong_oops_do(OopClosure* f); + void roots_oops_do(OopClosure* strong, OopClosure* weak); static uint bucket_size(); --- ./hotspot/src/share/vm/classfile/javaClasses.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/javaClasses.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -41,6 +41,7 @@ #include "oops/method.hpp" #include "oops/symbol.hpp" #include "oops/typeArrayOop.hpp" +#include "prims/jvmtiRedefineClassesTrace.hpp" #include "runtime/fieldDescriptor.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" @@ -463,12 +464,11 @@ return true; } -void java_lang_String::print(Handle java_string, outputStream* st) { - oop obj = java_string(); - assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string"); - typeArrayOop value = java_lang_String::value(obj); - int offset = java_lang_String::offset(obj); - int length = java_lang_String::length(obj); +void java_lang_String::print(oop java_string, outputStream* st) { + assert(java_string->klass() == SystemDictionary::String_klass(), "must be java_string"); + typeArrayOop value = java_lang_String::value(java_string); + int offset = java_lang_String::offset(java_string); + int length = java_lang_String::length(java_string); int end = MIN2(length, 100); if (value == NULL) { @@ -549,7 +549,7 @@ } } } - create_mirror(k, Handle(NULL), CHECK); + create_mirror(k, Handle(NULL), Handle(NULL), CHECK); } void java_lang_Class::initialize_mirror_fields(KlassHandle k, @@ -569,7 +569,8 @@ InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, mirror, CHECK); } -void java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) { +void java_lang_Class::create_mirror(KlassHandle k, Handle class_loader, + Handle protection_domain, TRAPS) { assert(k->java_mirror() == NULL, "should only assign mirror once"); // Use this moment of initialization to cache modifier_flags also, // to support Class.getModifiers(). Instance classes recalculate @@ -624,6 +625,10 @@ } } + // set the classLoader field in the java_lang_Class instance + assert(class_loader() == k->class_loader(), "should be same"); + set_class_loader(mirror(), class_loader()); + // Setup indirection from klass->mirror last // after any exceptions can happen during allocations. if (!k.is_null()) { @@ -685,6 +690,18 @@ } +void java_lang_Class::set_class_loader(oop java_class, oop loader) { + // jdk7 runs Queens in bootstrapping and jdk8-9 has no coordinated pushes yet. + if (_class_loader_offset != 0) { + java_class->obj_field_put(_class_loader_offset, loader); + } +} + +oop java_lang_Class::class_loader(oop java_class) { + assert(_class_loader_offset != 0, "must be set"); + return java_class->obj_field(_class_loader_offset); +} + oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) { // This should be improved by adding a field at the Java level or by // introducing a new VM klass (see comment in ClassFileParser) @@ -844,6 +861,12 @@ compute_optional_offset(classRedefinedCount_offset, klass_oop, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature()); + // Needs to be optional because the old build runs Queens during bootstrapping + // and jdk8-9 doesn't have coordinated pushes yet. + compute_optional_offset(_class_loader_offset, + klass_oop, vmSymbols::classLoader_name(), + vmSymbols::classloader_signature()); + CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); } @@ -2753,12 +2776,35 @@ return (Metadata*)mname->address_field(_vmtarget_offset); } +bool java_lang_invoke_MemberName::is_method(oop mname) { + assert(is_instance(mname), "must be MemberName"); + return (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0; +} + #if INCLUDE_JVMTI // Can be executed on VM thread only -void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Metadata* ref) { - assert((is_instance(mname) && (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0), "wrong type"); +void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Method* old_method, + Method* new_method, bool* trace_name_printed) { + assert(is_method(mname), "wrong type"); assert(Thread::current()->is_VM_thread(), "not VM thread"); - mname->address_field_put(_vmtarget_offset, (address)ref); + + Method* target = (Method*)mname->address_field(_vmtarget_offset); + if (target == old_method) { + mname->address_field_put(_vmtarget_offset, (address)new_method); + + if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { + if (!(*trace_name_printed)) { + // RC_TRACE_MESG macro has an embedded ResourceMark + RC_TRACE_MESG(("adjust: name=%s", + old_method->method_holder()->external_name())); + *trace_name_printed = true; + } + // RC_TRACE macro has an embedded ResourceMark + RC_TRACE(0x00400000, ("MemberName method update: %s(%s)", + new_method->name()->as_C_string(), + new_method->signature()->as_C_string())); + } + } } #endif // INCLUDE_JVMTI @@ -3083,6 +3129,7 @@ int java_lang_Class::_array_klass_offset; int java_lang_Class::_oop_size_offset; int java_lang_Class::_static_oop_field_count_offset; +int java_lang_Class::_class_loader_offset; int java_lang_Class::_protection_domain_offset; int java_lang_Class::_init_lock_offset; int java_lang_Class::_signers_offset; --- ./hotspot/src/share/vm/classfile/javaClasses.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/javaClasses.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -208,7 +208,7 @@ } // Debugging - static void print(Handle java_string, outputStream* st); + static void print(oop java_string, outputStream* st); friend class JavaClasses; }; @@ -239,19 +239,23 @@ static int _protection_domain_offset; static int _init_lock_offset; static int _signers_offset; + static int _class_loader_offset; static bool offsets_computed; static int classRedefinedCount_offset; + static GrowableArray* _fixup_mirror_list; static void set_init_lock(oop java_class, oop init_lock); static void set_protection_domain(oop java_class, oop protection_domain); + static void set_class_loader(oop java_class, oop class_loader); static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS); public: static void compute_offsets(); // Instance creation - static void create_mirror(KlassHandle k, Handle protection_domain, TRAPS); + static void create_mirror(KlassHandle k, Handle class_loader, + Handle protection_domain, TRAPS); static void fixup_mirror(KlassHandle k, TRAPS); static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS); // Conversion @@ -289,6 +293,8 @@ static objArrayOop signers(oop java_class); static void set_signers(oop java_class, objArrayOop signers); + static oop class_loader(oop java_class); + static int oop_size(oop java_class); static void set_oop_size(oop java_class, int size); static int static_oop_field_count(oop java_class); @@ -1090,7 +1096,8 @@ static Metadata* vmtarget(oop mname); static void set_vmtarget(oop mname, Metadata* target); #if INCLUDE_JVMTI - static void adjust_vmtarget(oop mname, Metadata* target); + static void adjust_vmtarget(oop mname, Method* old_method, Method* new_method, + bool* trace_name_printed); #endif // INCLUDE_JVMTI static intptr_t vmindex(oop mname); @@ -1104,6 +1111,8 @@ return obj != NULL && is_subclass(obj->klass()); } + static bool is_method(oop obj); + // Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants): enum { MN_IS_METHOD = 0x00010000, // method (not constructor) --- ./hotspot/src/share/vm/classfile/metadataOnStackMark.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/metadataOnStackMark.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -31,24 +31,25 @@ #include "runtime/synchronizer.hpp" #include "runtime/thread.hpp" #include "services/threadService.hpp" -#include "utilities/growableArray.hpp" +#include "utilities/chunkedList.hpp" +volatile MetadataOnStackBuffer* MetadataOnStackMark::_used_buffers = NULL; +volatile MetadataOnStackBuffer* MetadataOnStackMark::_free_buffers = NULL; -// Keep track of marked on-stack metadata so it can be cleared. -GrowableArray* _marked_objects = NULL; NOT_PRODUCT(bool MetadataOnStackMark::_is_active = false;) // Walk metadata on the stack and mark it so that redefinition doesn't delete // it. Class unloading also walks the previous versions and might try to // delete it, so this class is used by class unloading also. -MetadataOnStackMark::MetadataOnStackMark() { +MetadataOnStackMark::MetadataOnStackMark(bool visit_code_cache) { assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); + assert(_used_buffers == NULL, "sanity check"); NOT_PRODUCT(_is_active = true;) - if (_marked_objects == NULL) { - _marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray(1000, true); + + Threads::metadata_do(Metadata::mark_on_stack); + if (visit_code_cache) { + CodeCache::alive_nmethods_do(nmethod::mark_on_stack); } - Threads::metadata_do(Metadata::mark_on_stack); - CodeCache::alive_nmethods_do(nmethod::mark_on_stack); CompileBroker::mark_on_stack(); JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack); ThreadService::metadata_do(Metadata::mark_on_stack); @@ -59,15 +60,93 @@ // Unmark everything that was marked. Can't do the same walk because // redefine classes messes up the code cache so the set of methods // might not be the same. - for (int i = 0; i< _marked_objects->length(); i++) { - _marked_objects->at(i)->set_on_stack(false); + + retire_buffer_for_thread(Thread::current()); + + MetadataOnStackBuffer* buffer = const_cast(_used_buffers); + while (buffer != NULL) { + // Clear on stack state for all metadata. + size_t size = buffer->size(); + for (size_t i = 0; i < size; i++) { + Metadata* md = buffer->at(i); + md->set_on_stack(false); + } + + MetadataOnStackBuffer* next = buffer->next_used(); + + // Move the buffer to the free list. + buffer->clear(); + buffer->set_next_used(NULL); + buffer->set_next_free(const_cast(_free_buffers)); + _free_buffers = buffer; + + // Step to next used buffer. + buffer = next; } - _marked_objects->clear(); // reuse growable array for next time. + + _used_buffers = NULL; + NOT_PRODUCT(_is_active = false;) } +void MetadataOnStackMark::retire_buffer(MetadataOnStackBuffer* buffer) { + if (buffer == NULL) { + return; + } + + MetadataOnStackBuffer* old_head; + + do { + old_head = const_cast(_used_buffers); + buffer->set_next_used(old_head); + } while (Atomic::cmpxchg_ptr(buffer, &_used_buffers, old_head) != old_head); +} + +void MetadataOnStackMark::retire_buffer_for_thread(Thread* thread) { + retire_buffer(thread->metadata_on_stack_buffer()); + thread->set_metadata_on_stack_buffer(NULL); +} + +bool MetadataOnStackMark::has_buffer_for_thread(Thread* thread) { + return thread->metadata_on_stack_buffer() != NULL; +} + +MetadataOnStackBuffer* MetadataOnStackMark::allocate_buffer() { + MetadataOnStackBuffer* allocated; + MetadataOnStackBuffer* new_head; + + do { + allocated = const_cast(_free_buffers); + if (allocated == NULL) { + break; + } + new_head = allocated->next_free(); + } while (Atomic::cmpxchg_ptr(new_head, &_free_buffers, allocated) != allocated); + + if (allocated == NULL) { + allocated = new MetadataOnStackBuffer(); + } + + assert(!allocated->is_full(), err_msg("Should not be full: " PTR_FORMAT, p2i(allocated))); + + return allocated; +} + // Record which objects are marked so we can unmark the same objects. -void MetadataOnStackMark::record(Metadata* m) { +void MetadataOnStackMark::record(Metadata* m, Thread* thread) { assert(_is_active, "metadata on stack marking is active"); - _marked_objects->push(m); + + MetadataOnStackBuffer* buffer = thread->metadata_on_stack_buffer(); + + if (buffer != NULL && buffer->is_full()) { + retire_buffer(buffer); + buffer = NULL; + } + + if (buffer == NULL) { + buffer = allocate_buffer(); + thread->set_metadata_on_stack_buffer(buffer); + } + + buffer->push(m); } --- ./hotspot/src/share/vm/classfile/metadataOnStackMark.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/metadataOnStackMark.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,9 +26,12 @@ #define SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP #include "memory/allocation.hpp" +#include "utilities/chunkedList.hpp" class Metadata; +typedef ChunkedList MetadataOnStackBuffer; + // Helper class to mark and unmark metadata used on the stack as either handles // or executing methods, so that it can't be deleted during class redefinition // and class unloading. @@ -36,10 +39,20 @@ // metadata during parsing, relocated methods, and methods in backtraces. class MetadataOnStackMark : public StackObj { NOT_PRODUCT(static bool _is_active;) + + static volatile MetadataOnStackBuffer* _used_buffers; + static volatile MetadataOnStackBuffer* _free_buffers; + + static MetadataOnStackBuffer* allocate_buffer(); + static void retire_buffer(MetadataOnStackBuffer* buffer); + public: - MetadataOnStackMark(); - ~MetadataOnStackMark(); - static void record(Metadata* m); + MetadataOnStackMark(bool visit_code_cache); + ~MetadataOnStackMark(); + + static void record(Metadata* m, Thread* thread); + static void retire_buffer_for_thread(Thread* thread); + static bool has_buffer_for_thread(Thread* thread); }; #endif // SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/classfile/sharedClassUtil.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP +#define SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP + +#include "classfile/sharedPathsMiscInfo.hpp" +#include "memory/filemap.hpp" + +class SharedClassUtil : AllStatic { +public: + + static SharedPathsMiscInfo* allocate_shared_paths_misc_info() { + return new SharedPathsMiscInfo(); + } + + static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) { + return new SharedPathsMiscInfo(buf, size); + } + + static FileMapInfo::FileMapHeader* allocate_file_map_header() { + return new FileMapInfo::FileMapHeader(); + } + + static size_t file_map_header_size() { + return sizeof(FileMapInfo::FileMapHeader); + } + + static size_t shared_class_path_entry_size() { + return sizeof(SharedClassPathEntry); + } + + static void update_shared_classpath(ClassPathEntry *cpe, + SharedClassPathEntry* ent, + time_t timestamp, + long filesize, TRAPS) { + ent->_timestamp = timestamp; + ent->_filesize = filesize; + } + + static void initialize(TRAPS) {} + + inline static bool is_shared_boot_class(Klass* klass) { + return (klass->_shared_class_path_index >= 0); + } +}; + +#endif // SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/classLoader.hpp" +#include "classfile/classLoaderData.inline.hpp" +#include "classfile/sharedPathsMiscInfo.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/metaspaceShared.hpp" +#include "runtime/arguments.hpp" + +void SharedPathsMiscInfo::add_path(const char* path, int type) { + if (TraceClassPaths) { + tty->print("[type=%s] ", type_name(type)); + trace_class_path("[Add misc shared path ", path); + } + write(path, strlen(path) + 1); + write_jint(jint(type)); +} + +void SharedPathsMiscInfo::ensure_size(size_t needed_bytes) { + assert(_allocated, "cannot modify buffer during validation."); + int used = get_used_bytes(); + int target = used + int(needed_bytes); + if (target > _buf_size) { + _buf_size = _buf_size * 2 + (int)needed_bytes; + _buf_start = REALLOC_C_HEAP_ARRAY(char, _buf_start, _buf_size, mtClass); + _cur_ptr = _buf_start + used; + _end_ptr = _buf_start + _buf_size; + } +} + +void SharedPathsMiscInfo::write(const void* ptr, size_t size) { + ensure_size(size); + memcpy(_cur_ptr, ptr, size); + _cur_ptr += size; +} + +bool SharedPathsMiscInfo::read(void* ptr, size_t size) { + if (_cur_ptr + size <= _end_ptr) { + memcpy(ptr, _cur_ptr, size); + _cur_ptr += size; + return true; + } + return false; +} + +bool SharedPathsMiscInfo::fail(const char* msg, const char* name) { + ClassLoader::trace_class_path(msg, name); + MetaspaceShared::set_archive_loading_failed(); + return false; +} + +bool SharedPathsMiscInfo::check() { + // The whole buffer must be 0 terminated so that we can use strlen and strcmp + // without fear. + _end_ptr -= sizeof(jint); + if (_cur_ptr >= _end_ptr) { + return fail("Truncated archive file header"); + } + if (*_end_ptr != 0) { + return fail("Corrupted archive file header"); + } + + while (_cur_ptr < _end_ptr) { + jint type; + const char* path = _cur_ptr; + _cur_ptr += strlen(path) + 1; + if (!read_jint(&type)) { + return fail("Corrupted archive file header"); + } + if (TraceClassPaths) { + tty->print("[type=%s ", type_name(type)); + print_path(tty, type, path); + tty->print_cr("]"); + } + if (!check(type, path)) { + if (!PrintSharedArchiveAndExit) { + return false; + } + } else { + trace_class_path("[ok"); + } + } + + return true; +} + +bool SharedPathsMiscInfo::check(jint type, const char* path) { + switch (type) { + case BOOT: + if (strcmp(path, Arguments::get_sysclasspath()) != 0) { + return fail("[BOOT classpath mismatch, actual: -Dsun.boot.class.path=", Arguments::get_sysclasspath()); + } + break; + case NON_EXIST: // fall-through + case REQUIRED: + { + struct stat st; + if (os::stat(path, &st) != 0) { + // The file does not actually exist + if (type == REQUIRED) { + // but we require it to exist -> fail + return fail("Required file doesn't exist"); + } + } else { + // The file actually exists + if (type == NON_EXIST) { + // But we want it to not exist -> fail + return fail("File must not exist"); + } + time_t timestamp; + long filesize; + + if (!read_time(×tamp) || !read_long(&filesize)) { + return fail("Corrupted archive file header"); + } + if (timestamp != st.st_mtime) { + return fail("Timestamp mismatch"); + } + if (filesize != st.st_size) { + return fail("File size mismatch"); + } + } + } + break; + + default: + return fail("Corrupted archive file header"); + } + + return true; +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/classfile/sharedPathsMiscInfo.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP +#define SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP + +#include "runtime/os.hpp" + +// During dumping time, when processing class paths, we build up the dump-time +// classpath. The JAR files that exist are stored in the list ClassLoader::_first_entry. +// However, we need to store other "misc" information for run-time checking, such as +// +// + The values of Arguments::get_sysclasspath() used during dumping. +// +// + The meta-index file(s) used during dumping (incl modification time and size) +// +// + The class path elements specified during dumping but did not exist -- +// these elements must also be specified at run time, and they also must not +// exist at run time. +// +// These misc items are stored in a linear buffer in SharedPathsMiscInfo. +// The storage format is stream oriented to minimize its size. +// +// When writing the information to the archive file, SharedPathsMiscInfo is stored in +// the archive file header. At run-time, this information is used only during initialization +// (accessed using read() instead of mmap()), and is deallocated afterwards to save space. +// +// The SharedPathsMiscInfo class is used for both creating the the information (during +// dumping time) and validation (at run time). Different constructors are used in the +// two situations. See below. + +class SharedPathsMiscInfo : public CHeapObj { +protected: + char* _buf_start; + char* _cur_ptr; + char* _end_ptr; + int _buf_size; + bool _allocated; // was _buf_start allocated by me? + void ensure_size(size_t needed_bytes); + void add_path(const char* path, int type); + + void write(const void* ptr, size_t size); + bool read(void* ptr, size_t size); + + static void trace_class_path(const char* msg, const char* name = NULL) { + ClassLoader::trace_class_path(msg, name); + } +protected: + static bool fail(const char* msg, const char* name = NULL); + virtual bool check(jint type, const char* path); + +public: + enum { + INITIAL_BUF_SIZE = 128 + }; + // This constructor is used when creating the misc information (during dump) + SharedPathsMiscInfo() { + _buf_size = INITIAL_BUF_SIZE; + _cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass); + _allocated = true; + } + // This constructor is used when validating the misc info (during run time) + SharedPathsMiscInfo(char *buff, int size) { + _cur_ptr = _buf_start = buff; + _end_ptr = _buf_start + size; + _buf_size = size; + _allocated = false; + } + ~SharedPathsMiscInfo() { + if (_allocated) { + FREE_C_HEAP_ARRAY(char, _buf_start, mtClass); + } + } + int get_used_bytes() { + return _cur_ptr - _buf_start; + } + void* buffer() { + return _buf_start; + } + + // writing -- + + // The path must not exist at run-time + void add_nonexist_path(const char* path) { + add_path(path, NON_EXIST); + } + + // The path must exist and have required size and modification time + void add_required_file(const char* path) { + add_path(path, REQUIRED); + + struct stat st; + if (os::stat(path, &st) != 0) { + assert(0, "sanity"); + ClassLoader::exit_with_path_failure("failed to os::stat(%s)", path); // should not happen + } + write_time(st.st_mtime); + write_long(st.st_size); + } + + // The path must exist, and must contain exactly files/dirs + void add_boot_classpath(const char* path) { + add_path(path, BOOT); + } + int write_jint(jint num) { + write(&num, sizeof(num)); + return 0; + } + void write_time(time_t t) { + write(&t, sizeof(t)); + } + void write_long(long l) { + write(&l, sizeof(l)); + } + + bool dump_to_file(int fd) { + int n = get_used_bytes(); + return (os::write(fd, _buf_start, n) == (size_t)n); + } + + // reading -- + + enum { + BOOT = 1, + NON_EXIST = 2, + REQUIRED = 3 + }; + + virtual const char* type_name(int type) { + switch (type) { + case BOOT: return "BOOT"; + case NON_EXIST: return "NON_EXIST"; + case REQUIRED: return "REQUIRED"; + default: ShouldNotReachHere(); return "?"; + } + } + + virtual void print_path(outputStream* out, int type, const char* path) { + switch (type) { + case BOOT: + out->print("Expecting -Dsun.boot.class.path=%s", path); + break; + case NON_EXIST: + out->print("Expecting that %s does not exist", path); + break; + case REQUIRED: + out->print("Expecting that file %s must exist and is not altered", path); + break; + default: + ShouldNotReachHere(); + } + } + + bool check(); + bool read_jint(jint *ptr) { + return read(ptr, sizeof(jint)); + } + bool read_long(long *ptr) { + return read(ptr, sizeof(long)); + } + bool read_time(time_t *ptr) { + return read(ptr, sizeof(time_t)); + } +}; + +#endif // SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP --- ./hotspot/src/share/vm/classfile/symbolTable.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/symbolTable.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -36,6 +36,7 @@ #include "runtime/mutexLocker.hpp" #include "utilities/hashtable.inline.hpp" #if INCLUDE_ALL_GCS +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/g1StringDedup.hpp" #endif @@ -73,9 +74,9 @@ void SymbolTable::initialize_symbols(int arena_alloc_size) { // Initialize the arena for global symbols, size passed in depends on CDS. if (arena_alloc_size == 0) { - _arena = new (mtSymbol) Arena(); + _arena = new (mtSymbol) Arena(mtSymbol); } else { - _arena = new (mtSymbol) Arena(arena_alloc_size); + _arena = new (mtSymbol) Arena(mtSymbol, arena_alloc_size); } } @@ -204,7 +205,7 @@ } } // If the bucket size is too deep check if this hash code is insufficient. - if (count >= BasicHashtable::rehash_count && !needs_rehashing()) { + if (count >= rehash_count && !needs_rehashing()) { _needs_rehashing = check_rehash_table(count); } return NULL; @@ -655,7 +656,7 @@ } } // If the bucket size is too deep check if this hash code is insufficient. - if (count >= BasicHashtable::rehash_count && !needs_rehashing()) { + if (count >= rehash_count && !needs_rehashing()) { _needs_rehashing = check_rehash_table(count); } return NULL; @@ -704,11 +705,26 @@ return lookup(chars, length); } +// Tell the GC that this string was looked up in the StringTable. +static void ensure_string_alive(oop string) { + // A lookup in the StringTable could return an object that was previously + // considered dead. The SATB part of G1 needs to get notified about this + // potential resurrection, otherwise the marking might not find the object. +#if INCLUDE_ALL_GCS + if (UseG1GC && string != NULL) { + G1SATBCardTableModRefBS::enqueue(string); + } +#endif +} oop StringTable::lookup(jchar* name, int len) { unsigned int hash = hash_string(name, len); int index = the_table()->hash_to_index(hash); - return the_table()->lookup(index, name, len, hash); + oop string = the_table()->lookup(index, name, len, hash); + + ensure_string_alive(string); + + return string; } @@ -719,7 +735,10 @@ oop found_string = the_table()->lookup(index, name, len, hashValue); // Found - if (found_string != NULL) return found_string; + if (found_string != NULL) { + ensure_string_alive(found_string); + return found_string; + } debug_only(StableMemoryChecker smc(name, len * sizeof(name[0]))); assert(!Universe::heap()->is_in_reserved(name), @@ -744,11 +763,17 @@ // Grab the StringTable_lock before getting the_table() because it could // change at safepoint. - MutexLocker ml(StringTable_lock, THREAD); + oop added_or_found; + { + MutexLocker ml(StringTable_lock, THREAD); + // Otherwise, add to symbol to table + added_or_found = the_table()->basic_add(index, string, name, len, + hashValue, CHECK_NULL); + } - // Otherwise, add to symbol to table - return the_table()->basic_add(index, string, name, len, - hashValue, CHECK_NULL); + ensure_string_alive(added_or_found); + + return added_or_found; } oop StringTable::intern(Symbol* symbol, TRAPS) { --- ./hotspot/src/share/vm/classfile/symbolTable.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/symbolTable.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -74,7 +74,7 @@ operator Symbol*() { return _temp; } }; -class SymbolTable : public Hashtable { +class SymbolTable : public RehashableHashtable { friend class VMStructs; friend class ClassFileParser; @@ -110,10 +110,10 @@ Symbol* lookup(int index, const char* name, int len, unsigned int hash); SymbolTable() - : Hashtable(SymbolTableSize, sizeof (HashtableEntry)) {} + : RehashableHashtable(SymbolTableSize, sizeof (HashtableEntry)) {} SymbolTable(HashtableBucket* t, int number_of_entries) - : Hashtable(SymbolTableSize, sizeof (HashtableEntry), t, + : RehashableHashtable(SymbolTableSize, sizeof (HashtableEntry), t, number_of_entries) {} // Arena for permanent symbols (null class loader) that are never unloaded @@ -252,7 +252,7 @@ static int parallel_claimed_index() { return _parallel_claimed_idx; } }; -class StringTable : public Hashtable { +class StringTable : public RehashableHashtable { friend class VMStructs; private: @@ -278,11 +278,11 @@ // in the range [start_idx, end_idx). static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed); - StringTable() : Hashtable((int)StringTableSize, + StringTable() : RehashableHashtable((int)StringTableSize, sizeof (HashtableEntry)) {} StringTable(HashtableBucket* t, int number_of_entries) - : Hashtable((int)StringTableSize, sizeof (HashtableEntry), t, + : RehashableHashtable((int)StringTableSize, sizeof (HashtableEntry), t, number_of_entries) {} public: // The string table --- ./hotspot/src/share/vm/classfile/systemDictionary.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/systemDictionary.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -30,10 +30,15 @@ #include "classfile/placeholders.hpp" #include "classfile/resolutionErrors.hpp" #include "classfile/systemDictionary.hpp" +#if INCLUDE_CDS +#include "classfile/sharedClassUtil.hpp" +#include "classfile/systemDictionaryShared.hpp" +#endif #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" #include "interpreter/bytecodeStream.hpp" #include "interpreter/interpreter.hpp" +#include "memory/filemap.hpp" #include "memory/gcLocker.hpp" #include "memory/oopFactory.hpp" #include "oops/instanceKlass.hpp" @@ -46,20 +51,21 @@ #include "oops/typeArrayKlass.hpp" #include "prims/jvmtiEnvBase.hpp" #include "prims/methodHandles.hpp" +#include "runtime/arguments.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/fieldType.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/signature.hpp" #include "services/classLoadingService.hpp" #include "services/threadService.hpp" #include "utilities/macros.hpp" #include "utilities/ticks.hpp" - #if INCLUDE_TRACE - #include "trace/tracing.hpp" +#include "trace/tracing.hpp" #endif Dictionary* SystemDictionary::_dictionary = NULL; @@ -108,6 +114,8 @@ CHECK); _java_system_loader = (oop)result.get_jobject(); + + CDS_ONLY(SystemDictionaryShared::initialize(CHECK);) } @@ -973,6 +981,7 @@ // as the host_klass assert(EnableInvokeDynamic, ""); guarantee(host_klass->class_loader() == class_loader(), "should be the same"); + guarantee(!DumpSharedSpaces, "must not create anonymous classes when dumping"); loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL); loader_data->record_dependency(host_klass(), CHECK_NULL); } else { @@ -1134,7 +1143,7 @@ return k(); } - +#if INCLUDE_CDS void SystemDictionary::set_shared_dictionary(HashtableBucket* t, int length, int number_of_entries) { assert(length == _nof_buckets * sizeof(HashtableBucket), @@ -1167,15 +1176,21 @@ instanceKlassHandle SystemDictionary::load_shared_class( Symbol* class_name, Handle class_loader, TRAPS) { instanceKlassHandle ik (THREAD, find_shared_class(class_name)); - return load_shared_class(ik, class_loader, THREAD); + // Make sure we only return the boot class for the NULL classloader. + if (ik.not_null() && + SharedClassUtil::is_shared_boot_class(ik()) && class_loader.is_null()) { + Handle protection_domain; + return load_shared_class(ik, class_loader, protection_domain, THREAD); + } + return instanceKlassHandle(); } -instanceKlassHandle SystemDictionary::load_shared_class( - instanceKlassHandle ik, Handle class_loader, TRAPS) { - assert(class_loader.is_null(), "non-null classloader for shared class?"); +instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik, + Handle class_loader, + Handle protection_domain, TRAPS) { if (ik.not_null()) { instanceKlassHandle nh = instanceKlassHandle(); // null Handle - Symbol* class_name = ik->name(); + Symbol* class_name = ik->name(); // Found the class, now load the superclass and interfaces. If they // are shared, add them to the main system dictionary and reset @@ -1184,7 +1199,7 @@ if (ik->super() != NULL) { Symbol* cn = ik->super()->name(); resolve_super_or_fail(class_name, cn, - class_loader, Handle(), true, CHECK_(nh)); + class_loader, protection_domain, true, CHECK_(nh)); } Array* interfaces = ik->local_interfaces(); @@ -1197,7 +1212,7 @@ // reinitialized yet (they will be once the interface classes // are loaded) Symbol* name = k->name(); - resolve_super_or_fail(class_name, name, class_loader, Handle(), false, CHECK_(nh)); + resolve_super_or_fail(class_name, name, class_loader, protection_domain, false, CHECK_(nh)); } // Adjust methods to recover missing data. They need addresses for @@ -1206,30 +1221,45 @@ // Updating methods must be done under a lock so multiple // threads don't update these in parallel - // Shared classes are all currently loaded by the bootstrap - // classloader, so this will never cause a deadlock on - // a custom class loader lock. + // + // Shared classes are all currently loaded by either the bootstrap or + // internal parallel class loaders, so this will never cause a deadlock + // on a custom class loader lock. + ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader()); { Handle lockObject = compute_loader_lock_object(class_loader, THREAD); check_loader_lock_contention(lockObject, THREAD); ObjectLocker ol(lockObject, THREAD, true); - ik->restore_unshareable_info(CHECK_(nh)); + ik->restore_unshareable_info(loader_data, protection_domain, CHECK_(nh)); } if (TraceClassLoading) { ResourceMark rm; tty->print("[Loaded %s", ik->external_name()); tty->print(" from shared objects file"); + if (class_loader.not_null()) { + tty->print(" by %s", loader_data->loader_name()); + } tty->print_cr("]"); } + + if (DumpLoadedClassList != NULL && classlist_file->is_open()) { + // Only dump the classes that can be stored into CDS archive + if (SystemDictionaryShared::is_sharing_possible(loader_data)) { + ResourceMark rm(THREAD); + classlist_file->print_cr("%s", ik->name()->as_C_string()); + classlist_file->flush(); + } + } + // notify a class loaded from shared object ClassLoadingService::notify_class_loaded(InstanceKlass::cast(ik()), true /* shared class */); } return ik; } - +#endif // INCLUDE_CDS instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) { instanceKlassHandle nh = instanceKlassHandle(); // null Handle @@ -1239,8 +1269,10 @@ // shared spaces. instanceKlassHandle k; { +#if INCLUDE_CDS PerfTraceTime vmtimer(ClassLoader::perf_shared_classload_time()); k = load_shared_class(class_name, class_loader, THREAD); +#endif } if (k.is_null()) { @@ -1599,7 +1631,6 @@ Universe::flush_dependents_on(k); } - // ---------------------------------------------------------------------------- // GC support @@ -1612,13 +1643,7 @@ // system dictionary and follows the remaining classes' contents. void SystemDictionary::always_strong_oops_do(OopClosure* blk) { - blk->do_oop(&_java_system_loader); - blk->do_oop(&_system_loader_lock_obj); - - dictionary()->always_strong_oops_do(blk); - - // Visit extra methods - invoke_method_table()->oops_do(blk); + roots_oops_do(blk, NULL); } void SystemDictionary::always_strong_classes_do(KlassClosure* closure) { @@ -1665,12 +1690,11 @@ // Assumes classes in the SystemDictionary are only unloaded at a safepoint // Note: anonymous classes are not in the SD. -bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) { +bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive, bool clean_alive) { // First, mark for unload all ClassLoaderData referencing a dead class loader. - bool has_dead_loaders = ClassLoaderDataGraph::do_unloading(is_alive); - bool unloading_occurred = false; - if (has_dead_loaders) { - unloading_occurred = dictionary()->do_unloading(); + bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive, clean_alive); + if (unloading_occurred) { + dictionary()->do_unloading(); constraints()->purge_loader_constraints(); resolution_errors()->purge_resolution_errors(); } @@ -1685,9 +1709,22 @@ return unloading_occurred; } +void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) { + strong->do_oop(&_java_system_loader); + strong->do_oop(&_system_loader_lock_obj); + CDS_ONLY(SystemDictionaryShared::roots_oops_do(strong);) + + // Adjust dictionary + dictionary()->roots_oops_do(strong, weak); + + // Visit extra methods + invoke_method_table()->oops_do(strong); +} + void SystemDictionary::oops_do(OopClosure* f) { f->do_oop(&_java_system_loader); f->do_oop(&_system_loader_lock_obj); + CDS_ONLY(SystemDictionaryShared::oops_do(f);) // Adjust dictionary dictionary()->oops_do(f); @@ -1749,6 +1786,10 @@ invoke_method_table()->methods_do(f); } +void SystemDictionary::remove_classes_in_error_state() { + dictionary()->remove_classes_in_error_state(); +} + // ---------------------------------------------------------------------------- // Lazily load klasses @@ -2235,9 +2276,15 @@ spe = NULL; // Must create lots of stuff here, but outside of the SystemDictionary lock. m = Method::make_method_handle_intrinsic(iid, signature, CHECK_(empty)); - CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier, - methodHandle(), CompileThreshold, "MH", CHECK_(empty)); - + if (!Arguments::is_interpreter_only()) { + // Generate a compiled form of the MH intrinsic. + AdapterHandlerLibrary::create_native_wrapper(m); + // Check if have the compiled code. + if (!m->has_compiled_code()) { + THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(), + "out of space in CodeCache for method handle intrinsic", empty); + } + } // Now grab the lock. We might have to throw away the new method, // if a racing thread has managed to install one at the same time. { @@ -2251,6 +2298,9 @@ } assert(spe != NULL && spe->method() != NULL, ""); + assert(Arguments::is_interpreter_only() || (spe->method()->has_compiled_code() && + spe->method()->code()->entry_point() == spe->method()->from_compiled_entry()), + "MH intrinsic invariant"); return spe->method(); } @@ -2562,10 +2612,12 @@ // ---------------------------------------------------------------------------- -#ifndef PRODUCT +void SystemDictionary::print_shared(bool details) { + shared_dictionary()->print(details); +} -void SystemDictionary::print() { - dictionary()->print(); +void SystemDictionary::print(bool details) { + dictionary()->print(details); // Placeholders GCMutexLocker mu(SystemDictionary_lock); @@ -2575,7 +2627,6 @@ constraints()->print(); } -#endif void SystemDictionary::verify() { guarantee(dictionary() != NULL, "Verify of system dictionary failed"); @@ -2613,7 +2664,7 @@ class_loader->klass() : (Klass*)NULL); event.commit(); } -#endif /* INCLUDE_TRACE */ +#endif // INCLUDE_TRACE } #ifndef PRODUCT --- ./hotspot/src/share/vm/classfile/systemDictionary.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/systemDictionary.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -111,6 +111,7 @@ do_klass(SecurityManager_klass, java_lang_SecurityManager, Pre ) \ do_klass(ProtectionDomain_klass, java_security_ProtectionDomain, Pre ) \ do_klass(AccessControlContext_klass, java_security_AccessControlContext, Pre ) \ + do_klass(SecureClassLoader_klass, java_security_SecureClassLoader, Pre ) \ do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre ) \ do_klass(NoClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre ) \ do_klass(LinkageError_klass, java_lang_LinkageError, Pre ) \ @@ -168,6 +169,17 @@ do_klass(StringBuilder_klass, java_lang_StringBuilder, Pre ) \ do_klass(misc_Unsafe_klass, sun_misc_Unsafe, Pre ) \ \ + /* support for CDS */ \ + do_klass(ByteArrayInputStream_klass, java_io_ByteArrayInputStream, Pre ) \ + do_klass(File_klass, java_io_File, Pre ) \ + do_klass(URLClassLoader_klass, java_net_URLClassLoader, Pre ) \ + do_klass(URL_klass, java_net_URL, Pre ) \ + do_klass(Jar_Manifest_klass, java_util_jar_Manifest, Pre ) \ + do_klass(sun_misc_Launcher_klass, sun_misc_Launcher, Pre ) \ + do_klass(sun_misc_Launcher_AppClassLoader_klass, sun_misc_Launcher_AppClassLoader, Pre ) \ + do_klass(sun_misc_Launcher_ExtClassLoader_klass, sun_misc_Launcher_ExtClassLoader, Pre ) \ + do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \ + \ /* It's NULL in non-1.4 JDKs. */ \ do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \ /* Universe::is_gte_jdk14x_version() is not set up by this point. */ \ @@ -227,7 +239,7 @@ static Klass* resolve_or_fail(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS); // Convenient call for null loader and protection domain. static Klass* resolve_or_fail(Symbol* class_name, bool throw_error, TRAPS); -private: +protected: // handle error translation for resolve_or_null results static Klass* handle_resolution_exception(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, KlassHandle klass_h, TRAPS); @@ -330,17 +342,21 @@ // Unload (that is, break root links to) all unmarked classes and // loaders. Returns "true" iff something was unloaded. - static bool do_unloading(BoolObjectClosure* is_alive); + static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive = true); + + // Used by DumpSharedSpaces only to remove classes that failed verification + static void remove_classes_in_error_state(); static int calculate_systemdictionary_size(int loadedclasses); // Applies "f->do_oop" to all root oops in the system dictionary. static void oops_do(OopClosure* f); + static void roots_oops_do(OopClosure* strong, OopClosure* weak); // System loader lock static oop system_loader_lock() { return _system_loader_lock_obj; } -private: +protected: // Extended Redefine classes support (tbi) static void preloaded_classes_do(KlassClosure* f); static void lazily_loaded_classes_do(KlassClosure* f); @@ -353,7 +369,8 @@ static void set_shared_dictionary(HashtableBucket* t, int length, int number_of_entries); // Printing - static void print() PRODUCT_RETURN; + static void print(bool details = true); + static void print_shared(bool details = true); static void print_class_statistics() PRODUCT_RETURN; static void print_method_statistics() PRODUCT_RETURN; @@ -439,7 +456,7 @@ static void load_abstract_ownable_synchronizer_klass(TRAPS); -private: +protected: // Tells whether ClassLoader.loadClassInternal is present static bool has_loadClassInternal() { return _has_loadClassInternal; } @@ -467,7 +484,7 @@ // Register a new class loader static ClassLoaderData* register_loader(Handle class_loader, TRAPS); -private: +protected: // Mirrors for primitive classes (created eagerly) static oop check_mirror(oop m) { assert(m != NULL, "mirror not initialized"); @@ -536,7 +553,7 @@ static void delete_resolution_error(ConstantPool* pool); static Symbol* find_resolution_error(constantPoolHandle pool, int which); - private: + protected: enum Constants { _loader_constraint_size = 107, // number of entries in constraint table @@ -587,7 +604,7 @@ friend class CounterDecay; static Klass* try_get_next_class(); -private: +protected: static void validate_protection_domain(instanceKlassHandle klass, Handle class_loader, Handle protection_domain, TRAPS); @@ -614,10 +631,10 @@ static instanceKlassHandle find_or_define_instance_class(Symbol* class_name, Handle class_loader, instanceKlassHandle k, TRAPS); - static instanceKlassHandle load_shared_class(Symbol* class_name, - Handle class_loader, TRAPS); static instanceKlassHandle load_shared_class(instanceKlassHandle ik, - Handle class_loader, TRAPS); + Handle class_loader, + Handle protection_domain, + TRAPS); static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS); static Handle compute_loader_lock_object(Handle class_loader, TRAPS); static void check_loader_lock_contention(Handle loader_lock, TRAPS); @@ -625,9 +642,12 @@ static bool is_parallelDefine(Handle class_loader); public: + static instanceKlassHandle load_shared_class(Symbol* class_name, + Handle class_loader, + TRAPS); static bool is_ext_class_loader(Handle class_loader); -private: +protected: static Klass* find_shared_class(Symbol* class_name); // Setup link to hierarchy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/classfile/systemDictionaryShared.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + +#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP +#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP + +#include "classfile/systemDictionary.hpp" + +class SystemDictionaryShared: public SystemDictionary { +public: + static void initialize(TRAPS) {} + static instanceKlassHandle find_or_load_shared_class(Symbol* class_name, + Handle class_loader, + TRAPS) { + return instanceKlassHandle(); + } + static void roots_oops_do(OopClosure* blk) {} + static void oops_do(OopClosure* f) {} + static bool is_sharing_possible(ClassLoaderData* loader_data) { + oop class_loader = loader_data->class_loader(); + return (class_loader == NULL); + } +}; + +#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP --- ./hotspot/src/share/vm/classfile/verifier.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/verifier.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -43,7 +43,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" -#include "runtime/orderAccess.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" #ifdef TARGET_ARCH_x86 # include "bytes_x86.hpp" --- ./hotspot/src/share/vm/classfile/vmSymbols.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/classfile/vmSymbols.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,11 +92,17 @@ template(java_lang_CharSequence, "java/lang/CharSequence") \ template(java_lang_SecurityManager, "java/lang/SecurityManager") \ template(java_security_AccessControlContext, "java/security/AccessControlContext") \ + template(java_security_CodeSource, "java/security/CodeSource") \ template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \ + template(java_security_SecureClassLoader, "java/security/SecureClassLoader") \ + template(java_net_URLClassLoader, "java/net/URLClassLoader") \ + template(java_net_URL, "java/net/URL") \ + template(java_util_jar_Manifest, "java/util/jar/Manifest") \ template(impliesCreateAccessControlContext_name, "impliesCreateAccessControlContext") \ template(java_io_OutputStream, "java/io/OutputStream") \ template(java_io_Reader, "java/io/Reader") \ template(java_io_BufferedReader, "java/io/BufferedReader") \ + template(java_io_File, "java/io/File") \ template(java_io_FileInputStream, "java/io/FileInputStream") \ template(java_io_ByteArrayInputStream, "java/io/ByteArrayInputStream") \ template(java_io_Serializable, "java/io/Serializable") \ @@ -107,9 +113,11 @@ template(java_util_Hashtable, "java/util/Hashtable") \ template(java_lang_Compiler, "java/lang/Compiler") \ template(sun_misc_Signal, "sun/misc/Signal") \ + template(sun_misc_Launcher, "sun/misc/Launcher") \ template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \ template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \ template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \ + template(sun_misc_Launcher_AppClassLoader, "sun/misc/Launcher$AppClassLoader") \ template(sun_misc_Launcher_ExtClassLoader, "sun/misc/Launcher$ExtClassLoader") \ \ /* Java runtime version access */ \ @@ -398,6 +406,14 @@ template(signers_name, "signers_name") \ template(loader_data_name, "loader_data") \ template(dependencies_name, "dependencies") \ + template(input_stream_void_signature, "(Ljava/io/InputStream;)V") \ + template(getFileURL_name, "getFileURL") \ + template(getFileURL_signature, "(Ljava/io/File;)Ljava/net/URL;") \ + template(definePackageInternal_name, "definePackageInternal") \ + template(definePackageInternal_signature, "(Ljava/lang/String;Ljava/util/jar/Manifest;Ljava/net/URL;)V") \ + template(getProtectionDomain_name, "getProtectionDomain") \ + template(getProtectionDomain_signature, "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \ + template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \ \ /* non-intrinsic name/signature pairs: */ \ template(register_method_name, "register") \ @@ -573,6 +589,7 @@ template(serializePropertiesToByteArray_signature, "()[B") \ template(serializeAgentPropertiesToByteArray_name, "serializeAgentPropertiesToByteArray") \ template(classRedefinedCount_name, "classRedefinedCount") \ + template(classLoader_name, "classLoader") \ \ /* trace signatures */ \ TRACE_TEMPLATES(template) \ @@ -772,6 +789,11 @@ do_name( encodeISOArray_name, "encodeISOArray") \ do_signature(encodeISOArray_signature, "([CI[BII)I") \ \ + do_class(java_math_BigInteger, "java/math/BigInteger") \ + do_intrinsic(_multiplyToLen, java_math_BigInteger, multiplyToLen_name, multiplyToLen_signature, F_R) \ + do_name( multiplyToLen_name, "multiplyToLen") \ + do_signature(multiplyToLen_signature, "([II[II[I)[I") \ + \ /* java/lang/ref/Reference */ \ do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \ \ @@ -790,6 +812,26 @@ do_name( decrypt_name, "decrypt") \ do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)I") \ \ + /* support for sun.security.provider.SHA */ \ + do_class(sun_security_provider_sha, "sun/security/provider/SHA") \ + do_intrinsic(_sha_implCompress, sun_security_provider_sha, implCompress_name, implCompress_signature, F_R) \ + do_name( implCompress_name, "implCompress") \ + do_signature(implCompress_signature, "([BI)V") \ + \ + /* support for sun.security.provider.SHA2 */ \ + do_class(sun_security_provider_sha2, "sun/security/provider/SHA2") \ + do_intrinsic(_sha2_implCompress, sun_security_provider_sha2, implCompress_name, implCompress_signature, F_R) \ + \ + /* support for sun.security.provider.SHA5 */ \ + do_class(sun_security_provider_sha5, "sun/security/provider/SHA5") \ + do_intrinsic(_sha5_implCompress, sun_security_provider_sha5, implCompress_name, implCompress_signature, F_R) \ + \ + /* support for sun.security.provider.DigestBase */ \ + do_class(sun_security_provider_digestbase, "sun/security/provider/DigestBase") \ + do_intrinsic(_digestBase_implCompressMB, sun_security_provider_digestbase, implCompressMB_name, implCompressMB_signature, F_R) \ + do_name( implCompressMB_name, "implCompressMultiBlock") \ + do_signature(implCompressMB_signature, "([BII)I") \ + \ /* support for java.util.zip */ \ do_class(java_util_zip_CRC32, "java/util/zip/CRC32") \ do_intrinsic(_updateCRC32, java_util_zip_CRC32, update_name, int2_int_signature, F_SN) \ --- ./hotspot/src/share/vm/code/codeBlob.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/code/codeBlob.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -253,6 +253,7 @@ void BufferBlob::free( BufferBlob *blob ) { ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + blob->flush(); { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); CodeCache::free((CodeBlob*)blob); --- ./hotspot/src/share/vm/code/codeCache.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/code/codeCache.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -337,6 +337,11 @@ // Walk the list of methods which might contain non-perm oops. void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { assert_locked_or_safepoint(CodeCache_lock); + + if (UseG1GC) { + return; + } + debug_only(mark_scavenge_root_nmethods()); for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { @@ -362,6 +367,11 @@ void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { assert_locked_or_safepoint(CodeCache_lock); + + if (UseG1GC) { + return; + } + nm->set_on_scavenge_root_list(); nm->set_scavenge_root_link(_scavenge_root_nmethods); set_scavenge_root_nmethods(nm); @@ -370,6 +380,11 @@ void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { assert_locked_or_safepoint(CodeCache_lock); + + if (UseG1GC) { + return; + } + print_trace("drop_scavenge_root", nm); nmethod* last = NULL; nmethod* cur = scavenge_root_nmethods(); @@ -391,6 +406,11 @@ void CodeCache::prune_scavenge_root_nmethods() { assert_locked_or_safepoint(CodeCache_lock); + + if (UseG1GC) { + return; + } + debug_only(mark_scavenge_root_nmethods()); nmethod* last = NULL; @@ -423,6 +443,10 @@ #ifndef PRODUCT void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { + if (UseG1GC) { + return; + } + // While we are here, verify the integrity of the list. mark_scavenge_root_nmethods(); for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { @@ -463,9 +487,36 @@ } #endif //PRODUCT +void CodeCache::verify_clean_inline_caches() { +#ifdef ASSERT + FOR_ALL_ALIVE_BLOBS(cb) { + if (cb->is_nmethod()) { + nmethod* nm = (nmethod*)cb; + assert(!nm->is_unloaded(), "Tautology"); + nm->verify_clean_inline_caches(); + nm->verify(); + } + } +#endif +} + +void CodeCache::verify_icholder_relocations() { +#ifdef ASSERT + // make sure that we aren't leaking icholders + int count = 0; + FOR_ALL_BLOBS(cb) { + if (cb->is_nmethod()) { + nmethod* nm = (nmethod*)cb; + count += nm->verify_icholder_relocations(); + } + } + + assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == + CompiledICHolder::live_count(), "must agree"); +#endif +} void CodeCache::gc_prologue() { - assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); } void CodeCache::gc_epilogue() { @@ -478,41 +529,15 @@ nm->cleanup_inline_caches(); } DEBUG_ONLY(nm->verify()); - nm->fix_oop_relocations(); + DEBUG_ONLY(nm->verify_oop_relocations()); } } set_needs_cache_clean(false); prune_scavenge_root_nmethods(); - assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); -#ifdef ASSERT - // make sure that we aren't leaking icholders - int count = 0; - FOR_ALL_BLOBS(cb) { - if (cb->is_nmethod()) { - RelocIterator iter((nmethod*)cb); - while(iter.next()) { - if (iter.type() == relocInfo::virtual_call_type) { - if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) { - CompiledIC *ic = CompiledIC_at(iter.reloc()); - if (TraceCompiledIC) { - tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); - ic->print(); - } - assert(ic->cached_icholder() != NULL, "must be non-NULL"); - count++; - } - } - } - } - } - - assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == - CompiledICHolder::live_count(), "must agree"); -#endif + verify_icholder_relocations(); } - void CodeCache::verify_oops() { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); VerifyOopClosure voc; @@ -687,7 +712,9 @@ void CodeCache::mark_all_nmethods_for_deoptimization() { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); FOR_ALL_ALIVE_NMETHODS(nm) { - nm->mark_for_deoptimization(); + if (!nm->method()->is_method_handle_intrinsic()) { + nm->mark_for_deoptimization(); + } } } --- ./hotspot/src/share/vm/code/codeCache.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/code/codeCache.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -134,10 +134,6 @@ // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" // to "true" iff some code got unloaded. static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); - static void oops_do(OopClosure* f) { - CodeBlobToOopClosure oopc(f, /*do_marking=*/ false); - blobs_do(&oopc); - } static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN; static void scavenge_root_nmethods_do(CodeBlobClosure* f); @@ -172,6 +168,9 @@ static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } static void clear_inline_caches(); // clear all inline caches + static void verify_clean_inline_caches(); + static void verify_icholder_relocations(); + // Deoptimization static int mark_for_deoptimization(DepChange& changes); #ifdef HOTSWAP --- ./hotspot/src/share/vm/code/compiledIC.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/code/compiledIC.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -99,13 +99,13 @@ } { - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); + MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); #ifdef ASSERT - CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call); - assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); + CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call); + assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); #endif - _ic_call->set_destination_mt_safe(entry_point); -} + _ic_call->set_destination_mt_safe(entry_point); + } if (is_optimized() || is_icstub) { // Optimized call sites don't have a cache value and ICStub call @@ -159,6 +159,50 @@ //----------------------------------------------------------------------------- // High-level access to an inline cache. Guaranteed to be MT-safe. +void CompiledIC::initialize_from_iter(RelocIterator* iter) { + assert(iter->addr() == _ic_call->instruction_address(), "must find ic_call"); + + if (iter->type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter->virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} + +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = _ic_call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + + initialize_from_iter(&iter); +} + +CompiledIC::CompiledIC(RelocIterator* iter) + : _ic_call(nativeCall_at(iter->addr())) +{ + address ic_call = _ic_call->instruction_address(); + + nmethod* nm = iter->code(); + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + initialize_from_iter(iter); +} bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); @@ -485,7 +529,7 @@ void CompiledStaticCall::set_to_clean() { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); // Reset call site - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); + MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); #ifdef ASSERT CodeBlob* cb = CodeCache::find_blob_unsafe(this); assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); @@ -551,6 +595,7 @@ } else { // Callee is interpreted code. In any case entering the interpreter // puts a converter-frame on the stack to save arguments. + assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics"); info._to_interpreter = true; info._entry = m()->get_c2i_entry(); } --- ./hotspot/src/share/vm/code/compiledIC.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/code/compiledIC.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -150,6 +150,9 @@ bool _is_optimized; // an optimized virtual call (i.e., no compiled IC) CompiledIC(nmethod* nm, NativeCall* ic_call); + CompiledIC(RelocIterator* iter); + + void initialize_from_iter(RelocIterator* iter); static bool is_icholder_entry(address entry); @@ -183,6 +186,7 @@ friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr); friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site); friend CompiledIC* CompiledIC_at(Relocation* call_site); + friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter); // This is used to release CompiledICHolder*s from nmethods that // are about to be freed. The callsite might contain other stale @@ -263,6 +267,13 @@ return c_ic; } +inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) { + assert(reloc_iter->type() == relocInfo::virtual_call_type || + reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); + CompiledIC* c_ic = new CompiledIC(reloc_iter); + c_ic->verify(); + return c_ic; +} //----------------------------------------------------------------------------- // The CompiledStaticCall represents a call to a static method in the compiled --- ./hotspot/src/share/vm/code/dependencies.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/code/dependencies.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -32,6 +32,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/thread.inline.hpp" #include "utilities/copy.hpp" @@ -406,56 +407,66 @@ // for the sake of the compiler log, print out current dependencies: void Dependencies::log_all_dependencies() { if (log() == NULL) return; - ciBaseObject* args[max_arg_count]; + ResourceMark rm; for (int deptv = (int)FIRST_TYPE; deptv < (int)TYPE_LIMIT; deptv++) { DepType dept = (DepType)deptv; GrowableArray* deps = _deps[dept]; - if (deps->length() == 0) continue; + int deplen = deps->length(); + if (deplen == 0) { + continue; + } int stride = dep_args(dept); + GrowableArray* ciargs = new GrowableArray(stride); for (int i = 0; i < deps->length(); i += stride) { for (int j = 0; j < stride; j++) { // flush out the identities before printing - args[j] = deps->at(i+j); + ciargs->push(deps->at(i+j)); } - write_dependency_to(log(), dept, stride, args); + write_dependency_to(log(), dept, ciargs); + ciargs->clear(); } + guarantee(deplen == deps->length(), "deps array cannot grow inside nested ResoureMark scope"); } } void Dependencies::write_dependency_to(CompileLog* log, DepType dept, - int nargs, DepArgument args[], + GrowableArray* args, Klass* witness) { if (log == NULL) { return; } + ResourceMark rm; ciEnv* env = ciEnv::current(); - ciBaseObject* ciargs[max_arg_count]; - assert(nargs <= max_arg_count, "oob"); - for (int j = 0; j < nargs; j++) { - if (args[j].is_oop()) { - ciargs[j] = env->get_object(args[j].oop_value()); + GrowableArray* ciargs = new GrowableArray(args->length()); + for (GrowableArrayIterator it = args->begin(); it != args->end(); ++it) { + DepArgument arg = *it; + if (arg.is_oop()) { + ciargs->push(env->get_object(arg.oop_value())); } else { - ciargs[j] = env->get_metadata(args[j].metadata_value()); + ciargs->push(env->get_metadata(arg.metadata_value())); } } - Dependencies::write_dependency_to(log, dept, nargs, ciargs, witness); + int argslen = ciargs->length(); + Dependencies::write_dependency_to(log, dept, ciargs, witness); + guarantee(argslen == ciargs->length(), "ciargs array cannot grow inside nested ResoureMark scope"); } void Dependencies::write_dependency_to(CompileLog* log, DepType dept, - int nargs, ciBaseObject* args[], + GrowableArray* args, Klass* witness) { - if (log == NULL) return; - assert(nargs <= max_arg_count, "oob"); - int argids[max_arg_count]; - int ctxkj = dep_context_arg(dept); // -1 if no context arg - int j; - for (j = 0; j < nargs; j++) { - if (args[j]->is_object()) { - argids[j] = log->identify(args[j]->as_object()); + if (log == NULL) { + return; + } + ResourceMark rm; + GrowableArray* argids = new GrowableArray(args->length()); + for (GrowableArrayIterator it = args->begin(); it != args->end(); ++it) { + ciBaseObject* obj = *it; + if (obj->is_object()) { + argids->push(log->identify(obj->as_object())); } else { - argids[j] = log->identify(args[j]->as_metadata()); + argids->push(log->identify(obj->as_metadata())); } } if (witness != NULL) { @@ -464,16 +475,17 @@ log->begin_elem("dependency"); } log->print(" type='%s'", dep_name(dept)); - if (ctxkj >= 0) { - log->print(" ctxk='%d'", argids[ctxkj]); + const int ctxkj = dep_context_arg(dept); // -1 if no context arg + if (ctxkj >= 0 && ctxkj < argids->length()) { + log->print(" ctxk='%d'", argids->at(ctxkj)); } // write remaining arguments, if any. - for (j = 0; j < nargs; j++) { + for (int j = 0; j < argids->length(); j++) { if (j == ctxkj) continue; // already logged if (j == 1) { - log->print( " x='%d'", argids[j]); + log->print( " x='%d'", argids->at(j)); } else { - log->print(" x%d='%d'", j, argids[j]); + log->print(" x%d='%d'", j, argids->at(j)); } } if (witness != NULL) { @@ -485,9 +497,12 @@ void Dependencies::write_dependency_to(xmlStream* xtty, DepType dept, - int nargs, DepArgument args[], + GrowableArray* args, Klass* witness) { - if (xtty == NULL) return; + if (xtty == NULL) { + return; + } + ResourceMark rm; ttyLocker ttyl; int ctxkj = dep_context_arg(dept); // -1 if no context arg if (witness != NULL) { @@ -497,23 +512,24 @@ } xtty->print(" type='%s'", dep_name(dept)); if (ctxkj >= 0) { - xtty->object("ctxk", args[ctxkj].metadata_value()); + xtty->object("ctxk", args->at(ctxkj).metadata_value()); } // write remaining arguments, if any. - for (int j = 0; j < nargs; j++) { + for (int j = 0; j < args->length(); j++) { if (j == ctxkj) continue; // already logged + DepArgument arg = args->at(j); if (j == 1) { - if (args[j].is_oop()) { - xtty->object("x", args[j].oop_value()); + if (arg.is_oop()) { + xtty->object("x", arg.oop_value()); } else { - xtty->object("x", args[j].metadata_value()); + xtty->object("x", arg.metadata_value()); } } else { char xn[10]; sprintf(xn, "x%d", j); - if (args[j].is_oop()) { - xtty->object(xn, args[j].oop_value()); + if (arg.is_oop()) { + xtty->object(xn, arg.oop_value()); } else { - xtty->object(xn, args[j].metadata_value()); + xtty->object(xn, arg.metadata_value()); } } } @@ -524,7 +540,7 @@ xtty->end_elem(); } -void Dependencies::print_dependency(DepType dept, int nargs, DepArgument args[], +void Dependencies::print_dependency(DepType dept, GrowableArray* args, Klass* witness) { ResourceMark rm; ttyLocker ttyl; // keep the following output all in one block @@ -533,8 +549,8 @@ dep_name(dept)); // print arguments int ctxkj = dep_context_arg(dept); // -1 if no context arg - for (int j = 0; j < nargs; j++) { - DepArgument arg = args[j]; + for (int j = 0; j < args->length(); j++) { + DepArgument arg = args->at(j); bool put_star = false; if (arg.is_null()) continue; const char* what; @@ -570,31 +586,33 @@ void Dependencies::DepStream::log_dependency(Klass* witness) { if (_deps == NULL && xtty == NULL) return; // fast cutout for runtime ResourceMark rm; - int nargs = argument_count(); - DepArgument args[max_arg_count]; + const int nargs = argument_count(); + GrowableArray* args = new GrowableArray(nargs); for (int j = 0; j < nargs; j++) { if (type() == call_site_target_value) { - args[j] = argument_oop(j); + args->push(argument_oop(j)); } else { - args[j] = argument(j); + args->push(argument(j)); } } + int argslen = args->length(); if (_deps != NULL && _deps->log() != NULL) { - Dependencies::write_dependency_to(_deps->log(), - type(), nargs, args, witness); + Dependencies::write_dependency_to(_deps->log(), type(), args, witness); } else { - Dependencies::write_dependency_to(xtty, - type(), nargs, args, witness); + Dependencies::write_dependency_to(xtty, type(), args, witness); } + guarantee(argslen == args->length(), "args array cannot grow inside nested ResoureMark scope"); } void Dependencies::DepStream::print_dependency(Klass* witness, bool verbose) { + ResourceMark rm; int nargs = argument_count(); - DepArgument args[max_arg_count]; + GrowableArray* args = new GrowableArray(nargs); for (int j = 0; j < nargs; j++) { - args[j] = argument(j); + args->push(argument(j)); } - Dependencies::print_dependency(type(), nargs, args, witness); + int argslen = args->length(); + Dependencies::print_dependency(type(), args, witness); if (verbose) { if (_code != NULL) { tty->print(" code: "); @@ -602,6 +620,7 @@ tty->cr(); } } + guarantee(argslen == args->length(), "args array cannot grow inside nested ResoureMark scope"); } @@ -860,6 +879,8 @@ bool is_witness(Klass* k) { if (doing_subtype_search()) { return Dependencies::is_concrete_klass(k); + } else if (!k->oop_is_instance()) { + return false; // no methods to find in an array type } else { // Search class hierarchy first. Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature); @@ -1075,7 +1096,7 @@ Klass* chain; // scratch variable #define ADD_SUBCLASS_CHAIN(k) { \ assert(chaini < CHAINMAX, "oob"); \ - chain = InstanceKlass::cast(k)->subklass(); \ + chain = k->subklass(); \ if (chain != NULL) chains[chaini++] = chain; } // Look for non-abstract subclasses. @@ -1086,35 +1107,37 @@ // (Their subclasses are additional indirect implementors. // See InstanceKlass::add_implementor.) // (Note: nof_implementors is always zero for non-interfaces.) - int nof_impls = InstanceKlass::cast(context_type)->nof_implementors(); - if (nof_impls > 1) { - // Avoid this case: *I.m > { A.m, C }; B.m > C - // Here, I.m has 2 concrete implementations, but m appears unique - // as A.m, because the search misses B.m when checking C. - // The inherited method B.m was getting missed by the walker - // when interface 'I' was the starting point. - // %%% Until this is fixed more systematically, bail out. - // (Old CHA had the same limitation.) - return context_type; - } - if (nof_impls > 0) { - Klass* impl = InstanceKlass::cast(context_type)->implementor(); - assert(impl != NULL, "just checking"); - // If impl is the same as the context_type, then more than one - // implementor has seen. No exact info in this case. - if (impl == context_type) { - return context_type; // report an inexact witness to this sad affair + if (top_level_call) { + int nof_impls = InstanceKlass::cast(context_type)->nof_implementors(); + if (nof_impls > 1) { + // Avoid this case: *I.m > { A.m, C }; B.m > C + // Here, I.m has 2 concrete implementations, but m appears unique + // as A.m, because the search misses B.m when checking C. + // The inherited method B.m was getting missed by the walker + // when interface 'I' was the starting point. + // %%% Until this is fixed more systematically, bail out. + // (Old CHA had the same limitation.) + return context_type; } - if (do_counts) - { NOT_PRODUCT(deps_find_witness_steps++); } - if (is_participant(impl)) { - if (!participants_hide_witnesses) { + if (nof_impls > 0) { + Klass* impl = InstanceKlass::cast(context_type)->implementor(); + assert(impl != NULL, "just checking"); + // If impl is the same as the context_type, then more than one + // implementor has seen. No exact info in this case. + if (impl == context_type) { + return context_type; // report an inexact witness to this sad affair + } + if (do_counts) + { NOT_PRODUCT(deps_find_witness_steps++); } + if (is_participant(impl)) { + if (!participants_hide_witnesses) { + ADD_SUBCLASS_CHAIN(impl); + } + } else if (is_witness(impl) && !ignore_witness(impl)) { + return impl; + } else { ADD_SUBCLASS_CHAIN(impl); } - } else if (is_witness(impl) && !ignore_witness(impl)) { - return impl; - } else { - ADD_SUBCLASS_CHAIN(impl); } } --- ./hotspot/src/share/vm/code/dependencies.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/code/dependencies.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -366,20 +366,36 @@ void copy_to(nmethod* nm); void log_all_dependencies(); - void log_dependency(DepType dept, int nargs, ciBaseObject* args[]) { - write_dependency_to(log(), dept, nargs, args); + + void log_dependency(DepType dept, GrowableArray* args) { + ResourceMark rm; + int argslen = args->length(); + write_dependency_to(log(), dept, args); + guarantee(argslen == args->length(), + "args array cannot grow inside nested ResoureMark scope"); } + void log_dependency(DepType dept, ciBaseObject* x0, ciBaseObject* x1 = NULL, ciBaseObject* x2 = NULL) { - if (log() == NULL) return; - ciBaseObject* args[max_arg_count]; - args[0] = x0; - args[1] = x1; - args[2] = x2; - assert(2 < max_arg_count, ""); - log_dependency(dept, dep_args(dept), args); + if (log() == NULL) { + return; + } + ResourceMark rm; + GrowableArray* ciargs = + new GrowableArray(dep_args(dept)); + assert (x0 != NULL, "no log x0"); + ciargs->push(x0); + + if (x1 != NULL) { + ciargs->push(x1); + } + if (x2 != NULL) { + ciargs->push(x2); + } + assert(ciargs->length() == dep_args(dept), ""); + log_dependency(dept, ciargs); } class DepArgument : public ResourceObj { @@ -402,20 +418,8 @@ Metadata* metadata_value() const { assert(!_is_oop && _valid, "must be"); return (Metadata*) _value; } }; - static void write_dependency_to(CompileLog* log, - DepType dept, - int nargs, ciBaseObject* args[], - Klass* witness = NULL); - static void write_dependency_to(CompileLog* log, - DepType dept, - int nargs, DepArgument args[], - Klass* witness = NULL); - static void write_dependency_to(xmlStream* xtty, - DepType dept, - int nargs, DepArgument args[], - Klass* witness = NULL); static void print_dependency(DepType dept, - int nargs, DepArgument args[], + GrowableArray* args, Klass* witness = NULL); private: @@ -424,6 +428,18 @@ static Klass* ctxk_encoded_as_null(DepType dept, Metadata* x); + static void write_dependency_to(CompileLog* log, + DepType dept, + GrowableArray* args, + Klass* witness = NULL); + static void write_dependency_to(CompileLog* log, + DepType dept, + GrowableArray* args, + Klass* witness = NULL); + static void write_dependency_to(xmlStream* xtty, + DepType dept, + GrowableArray* args, + Klass* witness = NULL); public: // Use this to iterate over an nmethod's dependency set. // Works on new and old dependency sets. --- ./hotspot/src/share/vm/code/nmethod.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/code/nmethod.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -37,6 +37,7 @@ #include "oops/methodData.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/jvmtiImpl.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/sweeper.hpp" #include "utilities/dtrace.hpp" @@ -48,6 +49,8 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC +unsigned char nmethod::_global_unloading_clock = 0; + #ifdef DTRACE_ENABLED // Only bother with this argument setup if dtrace is available @@ -383,27 +386,30 @@ set_exception_cache(new_entry); } -void nmethod::remove_from_exception_cache(ExceptionCache* ec) { +void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) { ExceptionCache* prev = NULL; ExceptionCache* curr = exception_cache(); - assert(curr != NULL, "nothing to remove"); - // find the previous and next entry of ec - while (curr != ec) { - prev = curr; - curr = curr->next(); - assert(curr != NULL, "ExceptionCache not found"); + + while (curr != NULL) { + ExceptionCache* next = curr->next(); + + Klass* ex_klass = curr->exception_type(); + if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) { + if (prev == NULL) { + set_exception_cache(next); + } else { + prev->set_next(next); + } + delete curr; + // prev stays the same. + } else { + prev = curr; + } + + curr = next; } - // now: curr == ec - ExceptionCache* next = curr->next(); - if (prev == NULL) { - set_exception_cache(next); - } else { - prev->set_next(next); - } - delete curr; } - // public method for accessing the exception cache // These are the public access methods. address nmethod::handler_for_exception_and_pc(Handle exception, address pc) { @@ -462,6 +468,7 @@ // Fill in default values for various flag fields void nmethod::init_defaults() { _state = in_use; + _unloading_clock = 0; _marked_for_reclamation = 0; _has_flushed_dependencies = 0; _has_unsafe_access = 0; @@ -480,7 +487,11 @@ _oops_do_mark_link = NULL; _jmethod_id = NULL; _osr_link = NULL; - _scavenge_root_link = NULL; + if (UseG1GC) { + _unloading_next = NULL; + } else { + _scavenge_root_link = NULL; + } _scavenge_root_state = 0; _compiler = NULL; #if INCLUDE_RTM_OPT @@ -688,8 +699,10 @@ _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); code_buffer->copy_values_to(this); - if (ScavengeRootsInCode && detect_scavenge_root_oops()) { - CodeCache::add_scavenge_root_nmethod(this); + if (ScavengeRootsInCode) { + if (detect_scavenge_root_oops()) { + CodeCache::add_scavenge_root_nmethod(this); + } Universe::heap()->register_nmethod(this); } debug_only(verify_scavenge_root_oops()); @@ -773,8 +786,10 @@ _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); code_buffer->copy_values_to(this); - if (ScavengeRootsInCode && detect_scavenge_root_oops()) { - CodeCache::add_scavenge_root_nmethod(this); + if (ScavengeRootsInCode) { + if (detect_scavenge_root_oops()) { + CodeCache::add_scavenge_root_nmethod(this); + } Universe::heap()->register_nmethod(this); } DEBUG_ONLY(verify_scavenge_root_oops();) @@ -889,8 +904,10 @@ code_buffer->copy_values_to(this); debug_info->copy_to(this); dependencies->copy_to(this); - if (ScavengeRootsInCode && detect_scavenge_root_oops()) { - CodeCache::add_scavenge_root_nmethod(this); + if (ScavengeRootsInCode) { + if (detect_scavenge_root_oops()) { + CodeCache::add_scavenge_root_nmethod(this); + } Universe::heap()->register_nmethod(this); } debug_only(verify_scavenge_root_oops()); @@ -1156,7 +1173,7 @@ switch(iter.type()) { case relocInfo::virtual_call_type: case relocInfo::opt_virtual_call_type: { - CompiledIC *ic = CompiledIC_at(iter.reloc()); + CompiledIC *ic = CompiledIC_at(&iter); // Ok, to lookup references to zombies here CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); if( cb != NULL && cb->is_nmethod() ) { @@ -1180,6 +1197,77 @@ } } +void nmethod::verify_clean_inline_caches() { + assert_locked_or_safepoint(CompiledIC_lock); + + // If the method is not entrant or zombie then a JMP is plastered over the + // first few bytes. If an oop in the old code was there, that oop + // should not get GC'd. Skip the first few bytes of oops on + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (!is_in_use()) { + low_boundary += NativeJump::instruction_size; + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // This means that the low_boundary is going to be a little too high. + // This shouldn't matter, since oops of non-entrant methods are never used. + // In fact, why are we bothering to look at oops in a non-entrant method?? + } + + ResourceMark rm; + RelocIterator iter(this, low_boundary); + while(iter.next()) { + switch(iter.type()) { + case relocInfo::virtual_call_type: + case relocInfo::opt_virtual_call_type: { + CompiledIC *ic = CompiledIC_at(&iter); + // Ok, to lookup references to zombies here + CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); + if( cb != NULL && cb->is_nmethod() ) { + nmethod* nm = (nmethod*)cb; + // Verify that inline caches pointing to both zombie and not_entrant methods are clean + if (!nm->is_in_use() || (nm->method()->code() != nm)) { + assert(ic->is_clean(), "IC should be clean"); + } + } + break; + } + case relocInfo::static_call_type: { + CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); + CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); + if( cb != NULL && cb->is_nmethod() ) { + nmethod* nm = (nmethod*)cb; + // Verify that inline caches pointing to both zombie and not_entrant methods are clean + if (!nm->is_in_use() || (nm->method()->code() != nm)) { + assert(csc->is_clean(), "IC should be clean"); + } + } + break; + } + } + } +} + +int nmethod::verify_icholder_relocations() { + int count = 0; + + RelocIterator iter(this); + while(iter.next()) { + if (iter.type() == relocInfo::virtual_call_type) { + if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) { + CompiledIC *ic = CompiledIC_at(&iter); + if (TraceCompiledIC) { + tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); + ic->print(); + } + assert(ic->cached_icholder() != NULL, "must be non-NULL"); + count++; + } + } + } + + return count; +} + // This is a private interface with the sweeper. void nmethod::mark_as_seen_on_stack() { assert(is_alive(), "Must be an alive method"); @@ -1212,6 +1300,23 @@ mdo->inc_decompile_count(); } +void nmethod::increase_unloading_clock() { + _global_unloading_clock++; + if (_global_unloading_clock == 0) { + // _nmethods are allocated with _unloading_clock == 0, + // so 0 is never used as a clock value. + _global_unloading_clock = 1; + } +} + +void nmethod::set_unloading_clock(unsigned char unloading_clock) { + OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock); +} + +unsigned char nmethod::unloading_clock() { + return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock); +} + void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) { post_compiled_method_unload(); @@ -1257,6 +1362,10 @@ // for later on. CodeCache::set_needs_cache_clean(true); } + + // Unregister must be done before the state change + Universe::heap()->unregister_nmethod(this); + _state = unloaded; // Log the unloading. @@ -1611,6 +1720,45 @@ set_unload_reported(); } +void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) { + if (ic->is_icholder_call()) { + // The only exception is compiledICHolder oops which may + // yet be marked below. (We check this further below). + CompiledICHolder* cichk_oop = ic->cached_icholder(); + + if (mark_on_stack) { + Metadata::mark_on_stack(cichk_oop->holder_method()); + Metadata::mark_on_stack(cichk_oop->holder_klass()); + } + + if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) && + cichk_oop->holder_klass()->is_loader_alive(is_alive)) { + return; + } + } else { + Metadata* ic_oop = ic->cached_metadata(); + if (ic_oop != NULL) { + if (mark_on_stack) { + Metadata::mark_on_stack(ic_oop); + } + + if (ic_oop->is_klass()) { + if (((Klass*)ic_oop)->is_loader_alive(is_alive)) { + return; + } + } else if (ic_oop->is_method()) { + if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) { + return; + } + } else { + ShouldNotReachHere(); + } + } + } + + ic->set_to_clean(); +} + // This is called at the end of the strong tracing/marking phase of a // GC to unload an nmethod if it contains otherwise unreachable // oops. @@ -1643,15 +1791,7 @@ } // Exception cache - ExceptionCache* ec = exception_cache(); - while (ec != NULL) { - Klass* ex_klass = ec->exception_type(); - ExceptionCache* next_ec = ec->next(); - if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) { - remove_from_exception_cache(ec); - } - ec = next_ec; - } + clean_exception_cache(is_alive); // If class unloading occurred we first iterate over all inline caches and // clear ICs where the cached oop is referring to an unloaded klass or method. @@ -1661,32 +1801,8 @@ RelocIterator iter(this, low_boundary); while(iter.next()) { if (iter.type() == relocInfo::virtual_call_type) { - CompiledIC *ic = CompiledIC_at(iter.reloc()); - if (ic->is_icholder_call()) { - // The only exception is compiledICHolder oops which may - // yet be marked below. (We check this further below). - CompiledICHolder* cichk_oop = ic->cached_icholder(); - if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) && - cichk_oop->holder_klass()->is_loader_alive(is_alive)) { - continue; - } - } else { - Metadata* ic_oop = ic->cached_metadata(); - if (ic_oop != NULL) { - if (ic_oop->is_klass()) { - if (((Klass*)ic_oop)->is_loader_alive(is_alive)) { - continue; - } - } else if (ic_oop->is_method()) { - if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) { - continue; - } - } else { - ShouldNotReachHere(); - } - } - } - ic->set_to_clean(); + CompiledIC *ic = CompiledIC_at(&iter); + clean_ic_if_metadata_is_dead(ic, is_alive, false); } } } @@ -1724,6 +1840,224 @@ verify_metadata_loaders(low_boundary, is_alive); } +template +static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) { + // Ok, to lookup references to zombies here + CodeBlob *cb = CodeCache::find_blob_unsafe(addr); + if (cb != NULL && cb->is_nmethod()) { + nmethod* nm = (nmethod*)cb; + + if (nm->unloading_clock() != nmethod::global_unloading_clock()) { + // The nmethod has not been processed yet. + return true; + } + + // Clean inline caches pointing to both zombie and not_entrant methods + if (!nm->is_in_use() || (nm->method()->code() != nm)) { + ic->set_to_clean(); + assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string())); + } + } + + return false; +} + +static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) { + return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from); +} + +static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) { + return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from); +} + +bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) { + assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type"); + + oop_Relocation* r = iter_at_oop->oop_reloc(); + // Traverse those oops directly embedded in the code. + // Other oops (oop_index>0) are seen as part of scopes_oops. + assert(1 == (r->oop_is_immediate()) + + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), + "oop must be found in exactly one place"); + if (r->oop_is_immediate() && r->oop_value() != NULL) { + // Unload this nmethod if the oop is dead. + if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) { + return true;; + } + } + + return false; +} + +void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) { + assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type"); + + metadata_Relocation* r = iter_at_metadata->metadata_reloc(); + // In this metadata, we must only follow those metadatas directly embedded in + // the code. Other metadatas (oop_index>0) are seen as part of + // the metadata section below. + assert(1 == (r->metadata_is_immediate()) + + (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()), + "metadata must be found in exactly one place"); + if (r->metadata_is_immediate() && r->metadata_value() != NULL) { + Metadata* md = r->metadata_value(); + if (md != _method) Metadata::mark_on_stack(md); + } +} + +void nmethod::mark_metadata_on_stack_non_relocs() { + // Visit the metadata section + for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { + if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops + Metadata* md = *p; + Metadata::mark_on_stack(md); + } + + // Visit metadata not embedded in the other places. + if (_method != NULL) Metadata::mark_on_stack(_method); +} + +bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) { + ResourceMark rm; + + // Make sure the oop's ready to receive visitors + assert(!is_zombie() && !is_unloaded(), + "should not call follow on zombie or unloaded nmethod"); + + // If the method is not entrant then a JMP is plastered over the + // first few bytes. If an oop in the old code was there, that oop + // should not get GC'd. Skip the first few bytes of oops on + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (is_not_entrant()) { + low_boundary += NativeJump::instruction_size; + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // (See comment above.) + } + + // The RedefineClasses() API can cause the class unloading invariant + // to no longer be true. See jvmtiExport.hpp for details. + // Also, leave a debugging breadcrumb in local flag. + bool a_class_was_redefined = JvmtiExport::has_redefined_a_class(); + if (a_class_was_redefined) { + // This set of the unloading_occurred flag is done before the + // call to post_compiled_method_unload() so that the unloading + // of this nmethod is reported. + unloading_occurred = true; + } + + // When class redefinition is used all metadata in the CodeCache has to be recorded, + // so that unused "previous versions" can be purged. Since walking the CodeCache can + // be expensive, the "mark on stack" is piggy-backed on this parallel unloading code. + bool mark_metadata_on_stack = a_class_was_redefined; + + // Exception cache + clean_exception_cache(is_alive); + + bool is_unloaded = false; + bool postponed = false; + + RelocIterator iter(this, low_boundary); + while(iter.next()) { + + switch (iter.type()) { + + case relocInfo::virtual_call_type: + if (unloading_occurred) { + // If class unloading occurred we first iterate over all inline caches and + // clear ICs where the cached oop is referring to an unloaded klass or method. + clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack); + } + + postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); + break; + + case relocInfo::opt_virtual_call_type: + postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); + break; + + case relocInfo::static_call_type: + postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this); + break; + + case relocInfo::oop_type: + if (!is_unloaded) { + is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred); + } + break; + + case relocInfo::metadata_type: + if (mark_metadata_on_stack) { + mark_metadata_on_stack_at(&iter); + } + } + } + + if (mark_metadata_on_stack) { + mark_metadata_on_stack_non_relocs(); + } + + if (is_unloaded) { + return postponed; + } + + // Scopes + for (oop* p = oops_begin(); p < oops_end(); p++) { + if (*p == Universe::non_oop_word()) continue; // skip non-oops + if (can_unload(is_alive, p, unloading_occurred)) { + is_unloaded = true; + break; + } + } + + if (is_unloaded) { + return postponed; + } + + // Ensure that all metadata is still alive + verify_metadata_loaders(low_boundary, is_alive); + + return postponed; +} + +void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) { + ResourceMark rm; + + // Make sure the oop's ready to receive visitors + assert(!is_zombie(), + "should not call follow on zombie nmethod"); + + // If the method is not entrant then a JMP is plastered over the + // first few bytes. If an oop in the old code was there, that oop + // should not get GC'd. Skip the first few bytes of oops on + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (is_not_entrant()) { + low_boundary += NativeJump::instruction_size; + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // (See comment above.) + } + + RelocIterator iter(this, low_boundary); + while(iter.next()) { + + switch (iter.type()) { + + case relocInfo::virtual_call_type: + clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); + break; + + case relocInfo::opt_virtual_call_type: + clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); + break; + + case relocInfo::static_call_type: + clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this); + break; + } + } +} + #ifdef ASSERT class CheckClass : AllStatic { @@ -1770,7 +2104,7 @@ // compiled code is maintaining a link to dead metadata. address static_call_addr = NULL; if (iter.type() == relocInfo::opt_virtual_call_type) { - CompiledIC* cic = CompiledIC_at(iter.reloc()); + CompiledIC* cic = CompiledIC_at(&iter); if (!cic->is_call_to_interpreted()) { static_call_addr = iter.addr(); } @@ -1810,7 +2144,7 @@ while (iter.next()) { if (iter.type() == relocInfo::metadata_type ) { metadata_Relocation* r = iter.metadata_reloc(); - // In this lmetadata, we must only follow those metadatas directly embedded in + // In this metadata, we must only follow those metadatas directly embedded in // the code. Other metadatas (oop_index>0) are seen as part of // the metadata section below. assert(1 == (r->metadata_is_immediate()) + @@ -1822,7 +2156,7 @@ } } else if (iter.type() == relocInfo::virtual_call_type) { // Check compiledIC holders associated with this nmethod - CompiledIC *ic = CompiledIC_at(iter.reloc()); + CompiledIC *ic = CompiledIC_at(&iter); if (ic->is_icholder_call()) { CompiledICHolder* cichk = ic->cached_icholder(); f(cichk->holder_method()); @@ -1844,7 +2178,7 @@ f(md); } - // Call function Method*, not embedded in these other places. + // Visit metadata not embedded in the other places. if (_method != NULL) f(_method); } @@ -1940,7 +2274,7 @@ assert(cur != NULL, "not NULL-terminated"); nmethod* next = cur->_oops_do_mark_link; cur->_oops_do_mark_link = NULL; - cur->fix_oop_relocations(); + cur->verify_oop_relocations(); NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark")); cur = next; } @@ -2482,6 +2816,10 @@ }; void nmethod::verify_scavenge_root_oops() { + if (UseG1GC) { + return; + } + if (!on_scavenge_root_list()) { // Actually look inside, to verify the claim that it's clean. DebugScavengeRoot debug_scavenge_root(this); @@ -2925,7 +3263,7 @@ case relocInfo::virtual_call_type: case relocInfo::opt_virtual_call_type: { VerifyMutexLocker mc(CompiledIC_lock); - CompiledIC_at(iter.reloc())->print(); + CompiledIC_at(&iter)->print(); break; } case relocInfo::static_call_type: --- ./hotspot/src/share/vm/code/nmethod.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/code/nmethod.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -69,7 +69,12 @@ friend class VMStructs; private: enum { cache_size = 4 }; - PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found + // The array elements MUST be volatile! Several threads may modify + // and read from the cache concurrently. find_pc_desc_internal has + // returned wrong results. C++ compiler (namely xlC12) may duplicate + // C++ field accesses if the elements are not volatile. + typedef PcDesc* PcDescPtr; + volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found public: PcDescCache() { debug_only(_pc_descs[0] = NULL); } void reset_to(PcDesc* initial_pc_desc); @@ -111,6 +116,11 @@ friend class NMethodSweeper; friend class CodeCache; // scavengable oops private: + + // GC support to help figure out if an nmethod has been + // cleaned/unloaded by the current GC. + static unsigned char _global_unloading_clock; + // Shared fields for all nmethod's Method* _method; int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method @@ -118,7 +128,13 @@ // To support simple linked-list chaining of nmethods: nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head - nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods + + union { + // Used by G1 to chain nmethods. + nmethod* _unloading_next; + // Used by non-G1 GCs to chain nmethods. + nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods + }; static nmethod* volatile _oops_do_mark_nmethods; nmethod* volatile _oops_do_mark_link; @@ -180,6 +196,8 @@ // Protected by Patching_lock volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded} + volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod + #ifdef ASSERT bool _oops_are_stale; // indicates that it's no longer safe to access oops section #endif @@ -430,13 +448,25 @@ // alive. It is used when an uncommon trap happens. Returns true // if this thread changed the state of the nmethod or false if // another thread performed the transition. - bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); } + bool make_not_entrant() { + assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant"); + return make_not_entrant_or_zombie(not_entrant); + } bool make_zombie() { return make_not_entrant_or_zombie(zombie); } // used by jvmti to track if the unload event has been reported bool unload_reported() { return _unload_reported; } void set_unload_reported() { _unload_reported = true; } + void set_unloading_next(nmethod* next) { _unloading_next = next; } + nmethod* unloading_next() { return _unloading_next; } + + static unsigned char global_unloading_clock() { return _global_unloading_clock; } + static void increase_unloading_clock(); + + void set_unloading_clock(unsigned char unloading_clock); + unsigned char unloading_clock(); + bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; } void mark_for_deoptimization() { _marked_for_deoptimization = true; } @@ -529,7 +559,7 @@ void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } address handler_for_exception_and_pc(Handle exception, address pc); void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); - void remove_from_exception_cache(ExceptionCache* ec); + void clean_exception_cache(BoolObjectClosure* is_alive); // implicit exceptions support address continuation_for_implicit_exception(address pc); @@ -552,6 +582,10 @@ return (addr >= code_begin() && addr < verified_entry_point()); } + // Verify calls to dead methods have been cleaned. + void verify_clean_inline_caches(); + // Verify and count cached icholder relocations. + int verify_icholder_relocations(); // Check that all metadata is still alive void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive); @@ -577,8 +611,19 @@ // GC support void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); + // The parallel versions are used by G1. + bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred); + void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred); + + private: + // Unload a nmethod if the *root object is dead. bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred); + bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred); + void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata); + void mark_metadata_on_stack_non_relocs(); + + public: void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f); void oops_do(OopClosure* f) { oops_do(f, false); } --- ./hotspot/src/share/vm/code/relocInfo.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/code/relocInfo.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -877,11 +877,7 @@ void internal_word_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { address target = _target; if (target == NULL) { - if (addr_in_const()) { - target = new_addr_for(*(address*)addr(), src, dest); - } else { - target = new_addr_for(pd_get_address_from_code(), src, dest); - } + target = new_addr_for(this->target(), src, dest); } set_value(target); } @@ -890,7 +886,11 @@ address internal_word_Relocation::target() { address target = _target; if (target == NULL) { - target = pd_get_address_from_code(); + if (addr_in_const()) { + target = *(address*)addr(); + } else { + target = pd_get_address_from_code(); + } } return target; } --- ./hotspot/src/share/vm/compiler/compileBroker.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/compiler/compileBroker.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -183,9 +183,8 @@ long CompileBroker::_peak_compilation_time = 0; -CompileQueue* CompileBroker::_c2_method_queue = NULL; -CompileQueue* CompileBroker::_c1_method_queue = NULL; -CompileTask* CompileBroker::_task_free_list = NULL; +CompileQueue* CompileBroker::_c2_compile_queue = NULL; +CompileQueue* CompileBroker::_c1_compile_queue = NULL; GrowableArray* CompileBroker::_compiler_threads = NULL; @@ -253,13 +252,56 @@ // By convention, the compiling thread is responsible for // recycling a non-blocking CompileTask. - CompileBroker::free_task(task); + CompileTask::free(task); } } -// ------------------------------------------------------------------ -// CompileTask::initialize +CompileTask* CompileTask::_task_free_list = NULL; +#ifdef ASSERT +int CompileTask::_num_allocated_tasks = 0; +#endif +/** + * Allocate a CompileTask, from the free list if possible. + */ +CompileTask* CompileTask::allocate() { + MutexLocker locker(CompileTaskAlloc_lock); + CompileTask* task = NULL; + + if (_task_free_list != NULL) { + task = _task_free_list; + _task_free_list = task->next(); + task->set_next(NULL); + } else { + task = new CompileTask(); + DEBUG_ONLY(_num_allocated_tasks++;) + assert (_num_allocated_tasks < 10000, "Leaking compilation tasks?"); + task->set_next(NULL); + task->set_is_free(true); + } + assert(task->is_free(), "Task must be free."); + task->set_is_free(false); + return task; +} + + +/** + * Add a task to the free list. + */ +void CompileTask::free(CompileTask* task) { + MutexLocker locker(CompileTaskAlloc_lock); + if (!task->is_free()) { + task->set_code(NULL); + assert(!task->lock()->is_locked(), "Should not be locked when freed"); + JNIHandles::destroy_global(task->_method_holder); + JNIHandles::destroy_global(task->_hot_method_holder); + + task->set_is_free(true); + task->set_next(_task_free_list); + _task_free_list = task; + } +} + void CompileTask::initialize(int compile_id, methodHandle method, int osr_bci, @@ -287,6 +329,7 @@ _hot_count = hot_count; _time_queued = 0; // tidy _comment = comment; + _failure_reason = NULL; if (LogCompilation) { _time_queued = os::elapsed_counter(); @@ -317,15 +360,6 @@ if (nm == NULL) _code_handle = NULL; // drop the handle also } -// ------------------------------------------------------------------ -// CompileTask::free -void CompileTask::free() { - set_code(NULL); - assert(!_lock->is_locked(), "Should not be locked when freed"); - JNIHandles::destroy_global(_method_holder); - JNIHandles::destroy_global(_hot_method_holder); -} - void CompileTask::mark_on_stack() { // Mark these methods as something redefine classes cannot remove. @@ -565,6 +599,11 @@ methodHandle method(thread, this->method()); ResourceMark rm(thread); + if (!_is_success) { + const char* reason = _failure_reason != NULL ? _failure_reason : "unknown"; + log->elem("failure reason='%s'", reason); + } + // nmethod* nm = code(); log->begin_elem("task_done success='%d' nmsize='%d' count='%d'", @@ -588,9 +627,12 @@ -// Add a CompileTask to a CompileQueue +/** + * Add a CompileTask to a CompileQueue + */ void CompileQueue::add(CompileTask* task) { assert(lock()->owned_by_self(), "must own lock"); + assert(!CompileBroker::is_compilation_disabled_forever(), "Do not add task if compilation is turned off forever"); task->set_next(NULL); task->set_prev(NULL); @@ -612,9 +654,7 @@ // Mark the method as being in the compile queue. task->method()->set_queued_for_compilation(); - if (CIPrintCompileQueue) { - print(); - } + NOT_PRODUCT(print();) if (LogCompilation && xtty != NULL) { task->log_task_queued(); @@ -624,14 +664,32 @@ lock()->notify_all(); } -void CompileQueue::delete_all() { - assert(lock()->owned_by_self(), "must own lock"); - if (_first != NULL) { - for (CompileTask* task = _first; task != NULL; task = task->next()) { - delete task; +/** + * Empties compilation queue by putting all compilation tasks onto + * a freelist. Furthermore, the method wakes up all threads that are + * waiting on a compilation task to finish. This can happen if background + * compilation is disabled. + */ +void CompileQueue::free_all() { + MutexLocker mu(lock()); + CompileTask* next = _first; + + // Iterate over all tasks in the compile queue + while (next != NULL) { + CompileTask* current = next; + next = current->next(); + { + // Wake up thread that blocks on the compile task. + MutexLocker ct_lock(current->lock()); + current->lock()->notify(); } - _first = NULL; + // Put the task back on the freelist. + CompileTask::free(current); } + _first = NULL; + + // Wake up all threads that block on the queue. + lock()->notify_all(); } // ------------------------------------------------------------------ @@ -688,13 +746,40 @@ return NULL; } - CompileTask* task = CompilationPolicy::policy()->select_task(this); + CompileTask* task; + { + No_Safepoint_Verifier nsv; + task = CompilationPolicy::policy()->select_task(this); + } remove(task); + purge_stale_tasks(); // may temporarily release MCQ lock return task; } -void CompileQueue::remove(CompileTask* task) -{ +// Clean & deallocate stale compile tasks. +// Temporarily releases MethodCompileQueue lock. +void CompileQueue::purge_stale_tasks() { + assert(lock()->owned_by_self(), "must own lock"); + if (_first_stale != NULL) { + // Stale tasks are purged when MCQ lock is released, + // but _first_stale updates are protected by MCQ lock. + // Once task processing starts and MCQ lock is released, + // other compiler threads can reuse _first_stale. + CompileTask* head = _first_stale; + _first_stale = NULL; + { + MutexUnlocker ul(lock()); + for (CompileTask* task = head; task != NULL; ) { + CompileTask* next_task = task->next(); + CompileTaskWrapper ctw(task); // Frees the task + task->set_failure_reason("stale task"); + task = next_task; + } + } + } +} + +void CompileQueue::remove(CompileTask* task) { assert(lock()->owned_by_self(), "must own lock"); if (task->prev() != NULL) { task->prev()->set_next(task->next()); @@ -714,6 +799,16 @@ --_size; } +void CompileQueue::remove_and_mark_stale(CompileTask* task) { + assert(lock()->owned_by_self(), "must own lock"); + remove(task); + + // Enqueue the task for reclamation (should be done outside MCQ lock) + task->set_next(_first_stale); + task->set_prev(NULL); + _first_stale = task; +} + // methods in the compile queue need to be marked as used on the stack // so that they don't get reclaimed by Redefine Classes void CompileQueue::mark_on_stack() { @@ -724,18 +819,24 @@ } } -// ------------------------------------------------------------------ -// CompileQueue::print +#ifndef PRODUCT +/** + * Print entire compilation queue. + */ void CompileQueue::print() { - tty->print_cr("Contents of %s", name()); - tty->print_cr("----------------------"); - CompileTask* task = _first; - while (task != NULL) { - task->print_line(); - task = task->next(); + if (CIPrintCompileQueue) { + ttyLocker ttyl; + tty->print_cr("Contents of %s", name()); + tty->print_cr("----------------------"); + CompileTask* task = _first; + while (task != NULL) { + task->print_line(); + task = task->next(); + } + tty->print_cr("----------------------"); } - tty->print_cr("----------------------"); } +#endif // PRODUCT CompilerCounters::CompilerCounters(const char* thread_name, int instance, TRAPS) { @@ -808,9 +909,6 @@ _compilers[1] = new SharkCompiler(); #endif // SHARK - // Initialize the CompileTask free list - _task_free_list = NULL; - // Start the CompilerThreads init_compiler_threads(c1_count, c2_count); // totalTime performance counter is always created as it is required @@ -1003,11 +1101,11 @@ #endif // !ZERO && !SHARK // Initialize the compilation queue if (c2_compiler_count > 0) { - _c2_method_queue = new CompileQueue("C2MethodQueue", MethodCompileQueue_lock); + _c2_compile_queue = new CompileQueue("C2 CompileQueue", MethodCompileQueue_lock); _compilers[1]->set_num_compiler_threads(c2_compiler_count); } if (c1_compiler_count > 0) { - _c1_method_queue = new CompileQueue("C1MethodQueue", MethodCompileQueue_lock); + _c1_compile_queue = new CompileQueue("C1 CompileQueue", MethodCompileQueue_lock); _compilers[0]->set_num_compiler_threads(c1_compiler_count); } @@ -1022,7 +1120,7 @@ sprintf(name_buffer, "C2 CompilerThread%d", i); CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK); // Shark and C2 - CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK); + CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK); _compiler_threads->append(new_thread); } @@ -1031,7 +1129,7 @@ sprintf(name_buffer, "C1 CompilerThread%d", i); CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK); // C1 - CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK); + CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK); _compiler_threads->append(new_thread); } @@ -1041,14 +1139,19 @@ } -// Set the methods on the stack as on_stack so that redefine classes doesn't -// reclaim them +/** + * Set the methods on the stack as on_stack so that redefine classes doesn't + * reclaim them. This method is executed at a safepoint. + */ void CompileBroker::mark_on_stack() { - if (_c2_method_queue != NULL) { - _c2_method_queue->mark_on_stack(); + assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); + // Since we are at a safepoint, we do not need a lock to access + // the compile queues. + if (_c2_compile_queue != NULL) { + _c2_compile_queue->mark_on_stack(); } - if (_c1_method_queue != NULL) { - _c1_method_queue->mark_on_stack(); + if (_c1_compile_queue != NULL) { + _c1_compile_queue->mark_on_stack(); } } @@ -1064,7 +1167,7 @@ const char* comment, Thread* thread) { // do nothing if compiler thread(s) is not available - if (!_initialized ) { + if (!_initialized) { return; } @@ -1111,7 +1214,7 @@ // If this method is already in the compile queue, then // we do not block the current thread. - if (compilation_is_in_queue(method, osr_bci)) { + if (compilation_is_in_queue(method)) { // We may want to decay our counter a bit here to prevent // multiple denied requests for compilation. This is an // open compilation policy issue. Note: The other possibility, @@ -1132,6 +1235,12 @@ return; } + if (TieredCompilation) { + // Tiered policy requires MethodCounters to exist before adding a method to + // the queue. Create if we don't have them yet. + method->get_method_counters(thread); + } + // Outputs from the following MutexLocker block: CompileTask* task = NULL; bool blocking = false; @@ -1144,7 +1253,7 @@ // Make sure the method has not slipped into the queues since // last we checked; note that those checks were "fast bail-outs". // Here we need to be more careful, see 14012000 below. - if (compilation_is_in_queue(method, osr_bci)) { + if (compilation_is_in_queue(method)) { return; } @@ -1165,7 +1274,7 @@ } // Should this thread wait for completion of the compile? - blocking = is_compile_blocking(method, osr_bci); + blocking = is_compile_blocking(); // We will enter the compilation in the queue. // 14012000: Note that this sets the queued_for_compile bits in @@ -1357,19 +1466,17 @@ } -// ------------------------------------------------------------------ -// CompileBroker::compilation_is_in_queue -// -// See if this compilation is already requested. -// -// Implementation note: there is only a single "is in queue" bit -// for each method. This means that the check below is overly -// conservative in the sense that an osr compilation in the queue -// will block a normal compilation from entering the queue (and vice -// versa). This can be remedied by a full queue search to disambiguate -// cases. If it is deemed profitible, this may be done. -bool CompileBroker::compilation_is_in_queue(methodHandle method, - int osr_bci) { +/** + * See if this compilation is already requested. + * + * Implementation note: there is only a single "is in queue" bit + * for each method. This means that the check below is overly + * conservative in the sense that an osr compilation in the queue + * will block a normal compilation from entering the queue (and vice + * versa). This can be remedied by a full queue search to disambiguate + * cases. If it is deemed profitable, this may be done. + */ +bool CompileBroker::compilation_is_in_queue(methodHandle method) { return method->queued_for_compilation(); } @@ -1449,13 +1556,11 @@ #endif } - -// ------------------------------------------------------------------ -// CompileBroker::is_compile_blocking -// -// Should the current thread be blocked until this compilation request -// has been fulfilled? -bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) { +/** + * Should the current thread block until this compilation request + * has been fulfilled? + */ +bool CompileBroker::is_compile_blocking() { assert(!InstanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock"); return !BackgroundCompilation; } @@ -1483,7 +1588,7 @@ int hot_count, const char* comment, bool blocking) { - CompileTask* new_task = allocate_task(); + CompileTask* new_task = CompileTask::allocate(); new_task->initialize(compile_id, method, osr_bci, comp_level, hot_method, hot_count, comment, blocking); @@ -1492,75 +1597,52 @@ } -// ------------------------------------------------------------------ -// CompileBroker::allocate_task -// -// Allocate a CompileTask, from the free list if possible. -CompileTask* CompileBroker::allocate_task() { - MutexLocker locker(CompileTaskAlloc_lock); - CompileTask* task = NULL; - if (_task_free_list != NULL) { - task = _task_free_list; - _task_free_list = task->next(); - task->set_next(NULL); - } else { - task = new CompileTask(); - task->set_next(NULL); - } - return task; -} - - -// ------------------------------------------------------------------ -// CompileBroker::free_task -// -// Add a task to the free list. -void CompileBroker::free_task(CompileTask* task) { - MutexLocker locker(CompileTaskAlloc_lock); - task->free(); - task->set_next(_task_free_list); - _task_free_list = task; -} - - -// ------------------------------------------------------------------ -// CompileBroker::wait_for_completion -// -// Wait for the given method CompileTask to complete. +/** + * Wait for the compilation task to complete. + */ void CompileBroker::wait_for_completion(CompileTask* task) { if (CIPrintCompileQueue) { + ttyLocker ttyl; tty->print_cr("BLOCKING FOR COMPILE"); } assert(task->is_blocking(), "can only wait on blocking task"); - JavaThread *thread = JavaThread::current(); + JavaThread* thread = JavaThread::current(); thread->set_blocked_on_compilation(true); methodHandle method(thread, task->method()); { MutexLocker waiter(task->lock(), thread); - while (!task->is_complete()) + while (!task->is_complete() && !is_compilation_disabled_forever()) { task->lock()->wait(); + } } + + thread->set_blocked_on_compilation(false); + if (is_compilation_disabled_forever()) { + CompileTask::free(task); + return; + } + // It is harmless to check this status without the lock, because // completion is a stable property (until the task object is recycled). assert(task->is_complete(), "Compilation should have completed"); assert(task->code_handle() == NULL, "must be reset"); - thread->set_blocked_on_compilation(false); - // By convention, the waiter is responsible for recycling a // blocking CompileTask. Since there is only one waiter ever // waiting on a CompileTask, we know that no one else will // be using this CompileTask; we can free it. - free_task(task); + CompileTask::free(task); } -// Initialize compiler thread(s) + compiler object(s). The postcondition -// of this function is that the compiler runtimes are initialized and that -//compiler threads can start compiling. +/** + * Initialize compiler thread(s) + compiler object(s). The postcondition + * of this function is that the compiler runtimes are initialized and that + * compiler threads can start compiling. + */ bool CompileBroker::init_compiler_runtime() { CompilerThread* thread = CompilerThread::current(); AbstractCompiler* comp = thread->compiler(); @@ -1597,7 +1679,6 @@ disable_compilation_forever(); // If compiler initialization failed, no compiler thread that is specific to a // particular compiler runtime will ever start to compile methods. - shutdown_compiler_runtime(comp, thread); return false; } @@ -1611,9 +1692,11 @@ return true; } -// If C1 and/or C2 initialization failed, we shut down all compilation. -// We do this to keep things simple. This can be changed if it ever turns out to be -// a problem. +/** + * If C1 and/or C2 initialization failed, we shut down all compilation. + * We do this to keep things simple. This can be changed if it ever turns + * out to be a problem. + */ void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) { // Free buffer blob, if allocated if (thread->get_buffer_blob() != NULL) { @@ -1625,28 +1708,25 @@ // There are two reasons for shutting down the compiler // 1) compiler runtime initialization failed // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing - warning("Shutting down compiler %s (no space to run compilers)", comp->name()); + warning("%s initialization failed. Shutting down all compilers", comp->name()); // Only one thread per compiler runtime object enters here // Set state to shut down comp->set_shut_down(); - MutexLocker mu(MethodCompileQueue_lock, thread); - CompileQueue* queue; - if (_c1_method_queue != NULL) { - _c1_method_queue->delete_all(); - queue = _c1_method_queue; - _c1_method_queue = NULL; - delete _c1_method_queue; + // Delete all queued compilation tasks to make compiler threads exit faster. + if (_c1_compile_queue != NULL) { + _c1_compile_queue->free_all(); } - if (_c2_method_queue != NULL) { - _c2_method_queue->delete_all(); - queue = _c2_method_queue; - _c2_method_queue = NULL; - delete _c2_method_queue; + if (_c2_compile_queue != NULL) { + _c2_compile_queue->free_all(); } + // Set flags so that we continue execution with using interpreter only. + UseCompiler = false; + UseInterpreter = true; + // We could delete compiler runtimes also. However, there are references to // the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then // fail. This can be done later if necessary. @@ -1732,26 +1812,11 @@ if (method()->number_of_breakpoints() == 0) { // Compile the method. if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { -#ifdef COMPILER1 - // Allow repeating compilations for the purpose of benchmarking - // compile speed. This is not useful for customers. - if (CompilationRepeat != 0) { - int compile_count = CompilationRepeat; - while (compile_count > 0) { - invoke_compiler_on_method(task); - nmethod* nm = method->code(); - if (nm != NULL) { - nm->make_zombie(); - method->clear_code(); - } - compile_count--; - } - } -#endif /* COMPILER1 */ invoke_compiler_on_method(task); } else { // After compilation is disabled, remove remaining methods from queue method->clear_queued_for_compilation(); + task->set_failure_reason("compilation is disabled"); } } } @@ -1780,7 +1845,7 @@ os::file_separator(), thread_id, os::current_process_id()); } - fp = fopen(file_name, "at"); + fp = fopen(file_name, "wt"); if (fp != NULL) { if (LogCompilation && Verbose) { tty->print_cr("Opening compilation log %s", file_name); @@ -1939,6 +2004,7 @@ compilable = ci_env.compilable(); if (ci_env.failing()) { + task->set_failure_reason(ci_env.failure_reason()); const char* retry_message = ci_env.retry_message(); if (_compilation_log != NULL) { _compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message); @@ -2011,7 +2077,7 @@ // Note that the queued_for_compilation bits are cleared without // protection of a mutex. [They were set by the requester thread, - // when adding the task to the complie queue -- at which time the + // when adding the task to the compile queue -- at which time the // compile queue lock was held. Subsequently, we acquired the compile // queue lock to get this task off the compile queue; thus (to belabour // the point somewhat) our clearing of the bits must be occurring --- ./hotspot/src/share/vm/compiler/compileBroker.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/compiler/compileBroker.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -40,6 +40,11 @@ friend class VMStructs; private: + static CompileTask* _task_free_list; +#ifdef ASSERT + static int _num_allocated_tasks; +#endif + Monitor* _lock; uint _compile_id; Method* _method; @@ -52,13 +57,14 @@ int _num_inlined_bytecodes; nmethodLocker* _code_handle; // holder of eventual result CompileTask* _next, *_prev; - + bool _is_free; // Fields used for logging why the compilation was initiated: jlong _time_queued; // in units of os::elapsed_counter() Method* _hot_method; // which method actually triggered this task jobject _hot_method_holder; int _hot_count; // information about its invocation counter const char* _comment; // more info about the task + const char* _failure_reason; public: CompileTask() { @@ -69,7 +75,8 @@ methodHandle hot_method, int hot_count, const char* comment, bool is_blocking); - void free(); + static CompileTask* allocate(); + static void free(CompileTask* task); int compile_id() const { return _compile_id; } Method* method() const { return _method; } @@ -98,6 +105,8 @@ void set_next(CompileTask* next) { _next = next; } CompileTask* prev() const { return _prev; } void set_prev(CompileTask* prev) { _prev = prev; } + bool is_free() const { return _is_free; } + void set_is_free(bool val) { _is_free = val; } private: static void print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level, @@ -130,6 +139,10 @@ void log_task_queued(); void log_task_start(CompileLog* log); void log_task_done(CompileLog* log); + + void set_failure_reason(const char* reason) { + _failure_reason = reason; + } }; // CompilerCounters @@ -188,7 +201,11 @@ CompileTask* _first; CompileTask* _last; + CompileTask* _first_stale; + int _size; + + void purge_stale_tasks(); public: CompileQueue(const char* name, Monitor* lock) { _name = name; @@ -196,6 +213,7 @@ _first = NULL; _last = NULL; _size = 0; + _first_stale = NULL; } const char* name() const { return _name; } @@ -203,6 +221,7 @@ void add(CompileTask* task); void remove(CompileTask* task); + void remove_and_mark_stale(CompileTask* task); CompileTask* first() { return _first; } CompileTask* last() { return _last; } @@ -211,10 +230,11 @@ bool is_empty() const { return _first == NULL; } int size() const { return _size; } + // Redefine Classes support void mark_on_stack(); - void delete_all(); - void print(); + void free_all(); + NOT_PRODUCT (void print();) ~CompileQueue() { assert (is_empty(), " Compile Queue must be empty"); @@ -267,9 +287,8 @@ static int _last_compile_level; static char _last_method_compiled[name_buffer_length]; - static CompileQueue* _c2_method_queue; - static CompileQueue* _c1_method_queue; - static CompileTask* _task_free_list; + static CompileQueue* _c2_compile_queue; + static CompileQueue* _c1_compile_queue; static GrowableArray* _compiler_threads; @@ -322,7 +341,7 @@ static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count); static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level); static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level); - static bool is_compile_blocking (methodHandle method, int osr_bci); + static bool is_compile_blocking (); static void preload_classes (methodHandle method, TRAPS); static CompileTask* create_compile_task(CompileQueue* queue, @@ -334,8 +353,6 @@ int hot_count, const char* comment, bool blocking); - static CompileTask* allocate_task(); - static void free_task(CompileTask* task); static void wait_for_completion(CompileTask* task); static void invoke_compiler_on_method(CompileTask* task); @@ -353,8 +370,8 @@ const char* comment, Thread* thread); static CompileQueue* compile_queue(int comp_level) { - if (is_c2_compile(comp_level)) return _c2_method_queue; - if (is_c1_compile(comp_level)) return _c1_method_queue; + if (is_c2_compile(comp_level)) return _c2_compile_queue; + if (is_c1_compile(comp_level)) return _c1_compile_queue; return NULL; } static bool init_compiler_runtime(); @@ -372,7 +389,7 @@ return NULL; } - static bool compilation_is_in_queue(methodHandle method, int osr_bci); + static bool compilation_is_in_queue(methodHandle method); static int queue_size(int comp_level) { CompileQueue *q = compile_queue(comp_level); return q != NULL ? q->size() : 0; --- ./hotspot/src/share/vm/compiler/compileLog.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/compiler/compileLog.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -55,8 +55,10 @@ } CompileLog::~CompileLog() { - delete _out; + delete _out; // Close fd in fileStream::~fileStream() _out = NULL; + // Remove partial file after merging in CompileLog::finish_log_on_error + unlink(_file); FREE_C_HEAP_ARRAY(char, _identities, mtCompiler); FREE_C_HEAP_ARRAY(char, _file, mtCompiler); } @@ -268,10 +270,9 @@ } file->print_raw_cr(""); close(partial_fd); - unlink(partial_file); } CompileLog* next_log = log->_next; - delete log; + delete log; // Removes partial file log = next_log; } _first = NULL; --- ./hotspot/src/share/vm/compiler/compilerOracle.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/compiler/compilerOracle.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -167,44 +167,134 @@ } } +enum OptionType { + IntxType, + UintxType, + BoolType, + CcstrType, + UnknownType +}; -class MethodOptionMatcher: public MethodMatcher { - const char * option; - public: - MethodOptionMatcher(Symbol* class_name, Mode class_mode, - Symbol* method_name, Mode method_mode, - Symbol* signature, const char * opt, MethodMatcher* next): - MethodMatcher(class_name, class_mode, method_name, method_mode, signature, next) { - option = opt; +/* Methods to map real type names to OptionType */ +template +static OptionType get_type_for() { + return UnknownType; +}; + +template<> OptionType get_type_for() { + return IntxType; +} + +template<> OptionType get_type_for() { + return UintxType; +} + +template<> OptionType get_type_for() { + return BoolType; +} + +template<> OptionType get_type_for() { + return CcstrType; +} + +template +static const T copy_value(const T value) { + return value; +} + +template<> const ccstr copy_value(const ccstr value) { + return (const ccstr)strdup(value); +} + +template +class TypedMethodOptionMatcher : public MethodMatcher { + const char* _option; + OptionType _type; + const T _value; + +public: + TypedMethodOptionMatcher(Symbol* class_name, Mode class_mode, + Symbol* method_name, Mode method_mode, + Symbol* signature, const char* opt, + const T value, MethodMatcher* next) : + MethodMatcher(class_name, class_mode, method_name, method_mode, signature, next), + _type(get_type_for()), _value(copy_value(value)) { + _option = strdup(opt); } - bool match(methodHandle method, const char* opt) { - MethodOptionMatcher* current = this; + ~TypedMethodOptionMatcher() { + free((void*)_option); + } + + TypedMethodOptionMatcher* match(methodHandle method, const char* opt) { + TypedMethodOptionMatcher* current = this; while (current != NULL) { - current = (MethodOptionMatcher*)current->find(method); + current = (TypedMethodOptionMatcher*)current->find(method); if (current == NULL) { - return false; + return NULL; } - if (strcmp(current->option, opt) == 0) { - return true; + if (strcmp(current->_option, opt) == 0) { + return current; } current = current->next(); } - return false; + return NULL; } - MethodOptionMatcher* next() { - return (MethodOptionMatcher*)_next; + TypedMethodOptionMatcher* next() { + return (TypedMethodOptionMatcher*)_next; } - virtual void print() { + OptionType get_type(void) { + return _type; + }; + + T value() { return _value; } + + void print() { + ttyLocker ttyl; print_base(); - tty->print(" %s", option); + tty->print(" %s", _option); + tty->print(" "); tty->cr(); } }; +template<> +void TypedMethodOptionMatcher::print() { + ttyLocker ttyl; + print_base(); + tty->print(" intx %s", _option); + tty->print(" = " INTX_FORMAT, _value); + tty->cr(); +}; +template<> +void TypedMethodOptionMatcher::print() { + ttyLocker ttyl; + print_base(); + tty->print(" uintx %s", _option); + tty->print(" = " UINTX_FORMAT, _value); + tty->cr(); +}; + +template<> +void TypedMethodOptionMatcher::print() { + ttyLocker ttyl; + print_base(); + tty->print(" bool %s", _option); + tty->print(" = %s", _value ? "true" : "false"); + tty->cr(); +}; + +template<> +void TypedMethodOptionMatcher::print() { + ttyLocker ttyl; + print_base(); + tty->print(" const char* %s", _option); + tty->print(" = '%s'", _value); + tty->cr(); +}; // this must parallel the command_names below enum OracleCommand { @@ -259,23 +349,46 @@ return lists[command]; } - - +template static MethodMatcher* add_option_string(Symbol* class_name, MethodMatcher::Mode c_mode, Symbol* method_name, MethodMatcher::Mode m_mode, Symbol* signature, - const char* option) { - lists[OptionCommand] = new MethodOptionMatcher(class_name, c_mode, method_name, m_mode, - signature, option, lists[OptionCommand]); + const char* option, + T value) { + lists[OptionCommand] = new TypedMethodOptionMatcher(class_name, c_mode, method_name, m_mode, + signature, option, value, lists[OptionCommand]); return lists[OptionCommand]; } +template +static bool get_option_value(methodHandle method, const char* option, T& value) { + TypedMethodOptionMatcher* m; + if (lists[OptionCommand] != NULL + && (m = ((TypedMethodOptionMatcher*)lists[OptionCommand])->match(method, option)) != NULL + && m->get_type() == get_type_for()) { + value = m->value(); + return true; + } else { + return false; + } +} bool CompilerOracle::has_option_string(methodHandle method, const char* option) { - return lists[OptionCommand] != NULL && - ((MethodOptionMatcher*)lists[OptionCommand])->match(method, option); + bool value = false; + get_option_value(method, option, value); + return value; } +template +bool CompilerOracle::has_option_value(methodHandle method, const char* option, T& value) { + return ::get_option_value(method, option, value); +} + +// Explicit instantiation for all OptionTypes supported. +template bool CompilerOracle::has_option_value(methodHandle method, const char* option, intx& value); +template bool CompilerOracle::has_option_value(methodHandle method, const char* option, uintx& value); +template bool CompilerOracle::has_option_value(methodHandle method, const char* option, bool& value); +template bool CompilerOracle::has_option_value(methodHandle method, const char* option, ccstr& value); bool CompilerOracle::should_exclude(methodHandle method, bool& quietly) { quietly = true; @@ -433,6 +546,94 @@ +// Scan next flag and value in line, return MethodMatcher object on success, NULL on failure. +// On failure, error_msg contains description for the first error. +// For future extensions: set error_msg on first error. +static MethodMatcher* scan_flag_and_value(const char* type, const char* line, int& total_bytes_read, + Symbol* c_name, MethodMatcher::Mode c_match, + Symbol* m_name, MethodMatcher::Mode m_match, + Symbol* signature, + char* errorbuf, const int buf_size) { + total_bytes_read = 0; + int bytes_read = 0; + char flag[256]; + + // Read flag name. + if (sscanf(line, "%*[ \t]%255[a-zA-Z0-9]%n", flag, &bytes_read) == 1) { + line += bytes_read; + total_bytes_read += bytes_read; + + // Read value. + if (strcmp(type, "intx") == 0) { + intx value; + if (sscanf(line, "%*[ \t]" INTX_FORMAT "%n", &value, &bytes_read) == 1) { + total_bytes_read += bytes_read; + return add_option_string(c_name, c_match, m_name, m_match, signature, flag, value); + } else { + jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s ", flag, type); + } + } else if (strcmp(type, "uintx") == 0) { + uintx value; + if (sscanf(line, "%*[ \t]" UINTX_FORMAT "%n", &value, &bytes_read) == 1) { + total_bytes_read += bytes_read; + return add_option_string(c_name, c_match, m_name, m_match, signature, flag, value); + } else { + jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s", flag, type); + } + } else if (strcmp(type, "ccstr") == 0) { + ResourceMark rm; + char* value = NEW_RESOURCE_ARRAY(char, strlen(line) + 1); + if (sscanf(line, "%*[ \t]%255[_a-zA-Z0-9]%n", value, &bytes_read) == 1) { + total_bytes_read += bytes_read; + return add_option_string(c_name, c_match, m_name, m_match, signature, flag, (ccstr)value); + } else { + jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s", flag, type); + } + } else if (strcmp(type, "ccstrlist") == 0) { + // Accumulates several strings into one. The internal type is ccstr. + ResourceMark rm; + char* value = NEW_RESOURCE_ARRAY(char, strlen(line) + 1); + char* next_value = value; + if (sscanf(line, "%*[ \t]%255[_a-zA-Z0-9]%n", next_value, &bytes_read) == 1) { + total_bytes_read += bytes_read; + line += bytes_read; + next_value += bytes_read; + char* end_value = next_value-1; + while (sscanf(line, "%*[ \t]%255[_a-zA-Z0-9]%n", next_value, &bytes_read) == 1) { + total_bytes_read += bytes_read; + line += bytes_read; + *end_value = ' '; // override '\0' + next_value += bytes_read; + end_value = next_value-1; + } + return add_option_string(c_name, c_match, m_name, m_match, signature, flag, (ccstr)value); + } else { + jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s", flag, type); + } + } else if (strcmp(type, "bool") == 0) { + char value[256]; + if (sscanf(line, "%*[ \t]%255[a-zA-Z]%n", value, &bytes_read) == 1) { + if (strcmp(value, "true") == 0) { + total_bytes_read += bytes_read; + return add_option_string(c_name, c_match, m_name, m_match, signature, flag, true); + } else if (strcmp(value, "false") == 0) { + total_bytes_read += bytes_read; + return add_option_string(c_name, c_match, m_name, m_match, signature, flag, false); + } else { + jio_snprintf(errorbuf, buf_size, " Value cannot be read for flag %s of type %s", flag, type); + } + } else { + jio_snprintf(errorbuf, sizeof(errorbuf), " Value cannot be read for flag %s of type %s", flag, type); + } + } else { + jio_snprintf(errorbuf, sizeof(errorbuf), " Type %s not supported ", type); + } + } else { + jio_snprintf(errorbuf, sizeof(errorbuf), " Flag name for type %s should be alphanumeric ", type); + } + return NULL; +} + void CompilerOracle::parse_from_line(char* line) { if (line[0] == '\0') return; if (line[0] == '#') return; @@ -462,8 +663,10 @@ int bytes_read; OracleCommand command = parse_command_name(line, &bytes_read); line += bytes_read; + ResourceMark rm; if (command == UnknownCommand) { + ttyLocker ttyl; tty->print_cr("CompilerOracle: unrecognized line"); tty->print_cr(" \"%s\"", original_line); return; @@ -485,7 +688,7 @@ char method_name[256]; char sig[1024]; char errorbuf[1024]; - const char* error_msg = NULL; + const char* error_msg = NULL; // description of first error that appears MethodMatcher* match = NULL; if (scan_line(line, class_name, &c_match, method_name, &m_match, &bytes_read, error_msg)) { @@ -504,43 +707,77 @@ } if (command == OptionCommand) { - // Look for trailing options to support - // ciMethod::has_option("string") to control features in the - // compiler. Multiple options may follow the method name. - char option[256]; + // Look for trailing options. + // + // Two types of trailing options are + // supported: + // + // (1) CompileCommand=option,Klass::method,flag + // (2) CompileCommand=option,Klass::method,type,flag,value + // + // Type (1) is used to support ciMethod::has_option("someflag") + // (i.e., to check if a flag "someflag" is enabled for a method). + // + // Type (2) is used to support options with a value. Values can have the + // the following types: intx, uintx, bool, ccstr, and ccstrlist. + // + // For future extensions: extend scan_flag_and_value() + char option[256]; // stores flag for Type (1) and type of Type (2) while (sscanf(line, "%*[ \t]%255[a-zA-Z0-9]%n", option, &bytes_read) == 1) { if (match != NULL && !_quiet) { // Print out the last match added + ttyLocker ttyl; tty->print("CompilerOracle: %s ", command_names[command]); match->print(); } - match = add_option_string(c_name, c_match, m_name, m_match, signature, strdup(option)); line += bytes_read; - } + + if (strcmp(option, "intx") == 0 + || strcmp(option, "uintx") == 0 + || strcmp(option, "bool") == 0 + || strcmp(option, "ccstr") == 0 + || strcmp(option, "ccstrlist") == 0 + ) { + + // Type (2) option: parse flag name and value. + match = scan_flag_and_value(option, line, bytes_read, + c_name, c_match, m_name, m_match, signature, + errorbuf, sizeof(errorbuf)); + if (match == NULL) { + error_msg = errorbuf; + break; + } + line += bytes_read; + } else { + // Type (1) option + match = add_option_string(c_name, c_match, m_name, m_match, signature, option, true); + } + } // while( } else { - bytes_read = 0; - sscanf(line, "%*[ \t]%n", &bytes_read); - if (line[bytes_read] != '\0') { - jio_snprintf(errorbuf, sizeof(errorbuf), " Unrecognized text after command: %s", line); - error_msg = errorbuf; - } else { - match = add_predicate(command, c_name, c_match, m_name, m_match, signature); - } + match = add_predicate(command, c_name, c_match, m_name, m_match, signature); } } - if (match != NULL) { - if (!_quiet) { - ResourceMark rm; - tty->print("CompilerOracle: %s ", command_names[command]); - match->print(); - } - } else { + ttyLocker ttyl; + if (error_msg != NULL) { + // an error has happened tty->print_cr("CompilerOracle: unrecognized line"); tty->print_cr(" \"%s\"", original_line); if (error_msg != NULL) { tty->print_cr("%s", error_msg); } + } else { + // check for remaining characters + bytes_read = 0; + sscanf(line, "%*[ \t]%n", &bytes_read); + if (line[bytes_read] != '\0') { + tty->print_cr("CompilerOracle: unrecognized line"); + tty->print_cr(" \"%s\"", original_line); + tty->print_cr(" Unrecognized text %s after command ", line); + } else if (match != NULL && !_quiet) { + tty->print("CompilerOracle: %s ", command_names[command]); + match->print(); + } } } --- ./hotspot/src/share/vm/compiler/compilerOracle.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/compiler/compilerOracle.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -64,6 +64,11 @@ // Check to see if this method has option set for it static bool has_option_string(methodHandle method, const char * option); + // Check if method has option and value set. If yes, overwrite value and return true, + // otherwise leave value unchanged and return false. + template + static bool has_option_value(methodHandle method, const char* option, T& value); + // Reads from string instead of file static void parse_from_string(const char* command_string, void (*parser)(char*)); --- ./hotspot/src/share/vm/compiler/disassembler.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/compiler/disassembler.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -245,12 +245,12 @@ }; decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c) { - memset(this, 0, sizeof(*this)); + memset(this, 0, sizeof(*this)); // Beware, this zeroes bits of fields. _output = output ? output : tty; _code = code; if (code != NULL && code->is_nmethod()) _nm = (nmethod*) code; - _strings.assign(c); + _strings.copy(c); // by default, output pc but not bytes: _print_pc = true; --- ./hotspot/src/share/vm/compiler/methodLiveness.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/compiler/methodLiveness.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -475,7 +475,7 @@ bci = 0; } - MethodLivenessResult answer((uintptr_t*)NULL,0); + MethodLivenessResult answer((BitMap::bm_word_t*)NULL,0); if (_block_count > 0) { if (TimeLivenessAnalysis) _time_total.start(); @@ -1000,7 +1000,7 @@ } MethodLivenessResult MethodLiveness::BasicBlock::get_liveness_at(ciMethod* method, int bci) { - MethodLivenessResult answer(NEW_RESOURCE_ARRAY(uintptr_t, _analyzer->bit_map_size_words()), + MethodLivenessResult answer(NEW_RESOURCE_ARRAY(BitMap::bm_word_t, _analyzer->bit_map_size_words()), _analyzer->bit_map_size_bits()); answer.set_is_valid(); --- ./hotspot/src/share/vm/compiler/oopMap.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/compiler/oopMap.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -467,7 +467,6 @@ assert(cb != NULL, "no codeblob"); // Any reg might be saved by a safepoint handler (see generate_handler_blob). - const int max_saved_on_entry_reg_count = ConcreteRegisterImpl::number_of_registers; assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id), "already updated this map; do not 'update' it twice!" ); debug_only(reg_map->_update_for_id = fr->id()); @@ -477,27 +476,20 @@ !cb->caller_must_gc_arguments(reg_map->thread())), "include_argument_oops should already be set"); - int nof_callee = 0; - oop* locs[2*max_saved_on_entry_reg_count+1]; - VMReg regs[2*max_saved_on_entry_reg_count+1]; - // ("+1" because max_saved_on_entry_reg_count might be zero) - // Scan through oopmap and find location of all callee-saved registers // (we do not do update in place, since info could be overwritten) address pc = fr->pc(); + OopMap* map = cb->oop_map_for_return_address(pc); + assert(map != NULL, "no ptr map found"); + DEBUG_ONLY(int nof_callee = 0;) - OopMap* map = cb->oop_map_for_return_address(pc); - - assert(map != NULL, " no ptr map found"); - - OopMapValue omv; - for(OopMapStream oms(map,OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) { - omv = oms.current(); - assert(nof_callee < 2*max_saved_on_entry_reg_count, "overflow"); - regs[nof_callee] = omv.content_reg(); - locs[nof_callee] = fr->oopmapreg_to_location(omv.reg(),reg_map); - nof_callee++; + for (OopMapStream oms(map, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) { + OopMapValue omv = oms.current(); + VMReg reg = omv.content_reg(); + oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map); + reg_map->set_location(reg, (address) loc); + DEBUG_ONLY(nof_callee++;) } // Check that runtime stubs save all callee-saved registers @@ -506,11 +498,6 @@ (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT), "must save all"); #endif // COMPILER2 - - // Copy found callee-saved register to reg_map - for(int i = 0; i < nof_callee; i++) { - reg_map->set_location(regs[i], (address)locs[i]); - } } //============================================================================= --- ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -158,7 +158,7 @@ " coal_deaths(" SIZE_FORMAT ")" " + count(" SSIZE_FORMAT ")", p2i(this), size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(), - _allocation_stats.split_births(), _allocation_stats.split_deaths(), + _allocation_stats.coal_births(), _allocation_stats.split_deaths(), _allocation_stats.coal_deaths(), count())); } #endif --- ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,7 +53,8 @@ } void ConcurrentMarkSweepPolicy::initialize_generations() { - _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); + _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, + CURRENT_PC, AllocFailStrategy::RETURN_NULL); if (_generations == NULL) vm_exit_during_initialization("Unable to allocate gen spec"); --- ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP #include "memory/genOopClosures.hpp" +#include "memory/iterator.hpp" ///////////////////////////////////////////////////////////////// // Closures used by ConcurrentMarkSweepGeneration's collector @@ -48,33 +49,13 @@ } \ } -// Applies the given oop closure to all oops in all klasses visited. -class CMKlassClosure : public KlassClosure { - friend class CMSOopClosure; - friend class CMSOopsInGenClosure; - - OopClosure* _oop_closure; - - // Used when _oop_closure couldn't be set in an initialization list. - void initialize(OopClosure* oop_closure) { - assert(_oop_closure == NULL, "Should only be called once"); - _oop_closure = oop_closure; - } +// TODO: This duplication of the MetadataAwareOopClosure class is only needed +// because some CMS OopClosures derive from OopsInGenClosure. It would be +// good to get rid of them completely. +class MetadataAwareOopsInGenClosure: public OopsInGenClosure { + KlassToOopClosure _klass_closure; public: - CMKlassClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) { } - - void do_klass(Klass* k); -}; - -// The base class for all CMS marking closures. -// It's used to proxy through the metadata to the oops defined in them. -class CMSOopClosure: public ExtendedOopClosure { - CMKlassClosure _klass_closure; - public: - CMSOopClosure() : ExtendedOopClosure() { - _klass_closure.initialize(this); - } - CMSOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { + MetadataAwareOopsInGenClosure() { _klass_closure.initialize(this); } @@ -87,26 +68,7 @@ virtual void do_class_loader_data(ClassLoaderData* cld); }; -// TODO: This duplication of the CMSOopClosure class is only needed because -// some CMS OopClosures derive from OopsInGenClosure. It would be good -// to get rid of them completely. -class CMSOopsInGenClosure: public OopsInGenClosure { - CMKlassClosure _klass_closure; - public: - CMSOopsInGenClosure() { - _klass_closure.initialize(this); - } - - virtual bool do_metadata() { return do_metadata_nv(); } - inline bool do_metadata_nv() { return true; } - - virtual void do_klass(Klass* k); - void do_klass_nv(Klass* k); - - virtual void do_class_loader_data(ClassLoaderData* cld); -}; - -class MarkRefsIntoClosure: public CMSOopsInGenClosure { +class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure { private: const MemRegion _span; CMSBitMap* _bitMap; @@ -122,7 +84,7 @@ } }; -class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure { +class Par_MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure { private: const MemRegion _span; CMSBitMap* _bitMap; @@ -140,7 +102,7 @@ // A variant of the above used in certain kinds of CMS // marking verification. -class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure { +class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure { private: const MemRegion _span; CMSBitMap* _verification_bm; @@ -159,7 +121,7 @@ }; // The non-parallel version (the parallel version appears further below). -class PushAndMarkClosure: public CMSOopClosure { +class PushAndMarkClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; MemRegion _span; @@ -193,7 +155,7 @@ // synchronization (for instance, via CAS). The marking stack // used in the non-parallel case above is here replaced with // an OopTaskQueue structure to allow efficient work stealing. -class Par_PushAndMarkClosure: public CMSOopClosure { +class Par_PushAndMarkClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; MemRegion _span; @@ -218,7 +180,7 @@ }; // The non-parallel version (the parallel version appears further below). -class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { +class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure { private: MemRegion _span; CMSBitMap* _bit_map; @@ -262,7 +224,7 @@ // stack and the bitMap are shared, so access needs to be suitably // sycnhronized. An OopTaskQueue structure, supporting efficient // workstealing, replaces a CMSMarkStack for storing grey objects. -class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { +class Par_MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure { private: MemRegion _span; CMSBitMap* _bit_map; @@ -291,7 +253,7 @@ // This closure is used during the concurrent marking phase // following the first checkpoint. Its use is buried in // the closure MarkFromRootsClosure. -class PushOrMarkClosure: public CMSOopClosure { +class PushOrMarkClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; MemRegion _span; @@ -324,7 +286,7 @@ // This closure is used during the concurrent marking phase // following the first checkpoint. Its use is buried in // the closure Par_MarkFromRootsClosure. -class Par_PushOrMarkClosure: public CMSOopClosure { +class Par_PushOrMarkClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; MemRegion _whole_span; @@ -364,7 +326,7 @@ // processing phase of the CMS final checkpoint step, as // well as during the concurrent precleaning of the discovered // reference lists. -class CMSKeepAliveClosure: public CMSOopClosure { +class CMSKeepAliveClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; const MemRegion _span; @@ -384,7 +346,7 @@ inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } }; -class CMSInnerParMarkAndPushClosure: public CMSOopClosure { +class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; MemRegion _span; @@ -405,7 +367,7 @@ // A parallel (MT) version of the above, used when // reference processing is parallel; the only difference // is in the do_oop method. -class CMSParKeepAliveClosure: public CMSOopClosure { +class CMSParKeepAliveClosure: public MetadataAwareOopClosure { private: MemRegion _span; OopTaskQueue* _work_queue; --- ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -44,33 +44,20 @@ } } -// CMSOopClosure and CMSoopsInGenClosure are duplicated, +// MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated, // until we get rid of OopsInGenClosure. -inline void CMSOopClosure::do_klass(Klass* k) { do_klass_nv(k); } -inline void CMSOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); } - -inline void CMSOopClosure::do_klass_nv(Klass* k) { +inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) { ClassLoaderData* cld = k->class_loader_data(); do_class_loader_data(cld); } -inline void CMSOopsInGenClosure::do_klass_nv(Klass* k) { - ClassLoaderData* cld = k->class_loader_data(); - do_class_loader_data(cld); -} +inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); } -inline void CMSOopClosure::do_class_loader_data(ClassLoaderData* cld) { - assert(_klass_closure._oop_closure == this, "Must be"); - - bool claim = true; // Must claim the class loader data before processing. - cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim); -} -inline void CMSOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) { +inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) { assert(_klass_closure._oop_closure == this, "Must be"); bool claim = true; // Must claim the class loader data before processing. cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim); } - #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_INLINE_HPP --- ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -33,12 +33,14 @@ #include "memory/allocation.inline.hpp" #include "memory/blockOffsetTable.inline.hpp" #include "memory/resourceArea.hpp" +#include "memory/space.inline.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/globals.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/vmThread.hpp" #include "utilities/copy.hpp" @@ -793,53 +795,6 @@ } } -// Apply the given closure to each oop in the space \intersect memory region. -void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { - assert_lock_strong(freelistLock()); - if (is_empty()) { - return; - } - MemRegion cur = MemRegion(bottom(), end()); - mr = mr.intersection(cur); - if (mr.is_empty()) { - return; - } - if (mr.equals(cur)) { - oop_iterate(cl); - return; - } - assert(mr.end() <= end(), "just took an intersection above"); - HeapWord* obj_addr = block_start(mr.start()); - HeapWord* t = mr.end(); - - SpaceMemRegionOopsIterClosure smr_blk(cl, mr); - if (block_is_obj(obj_addr)) { - // Handle first object specially. - oop obj = oop(obj_addr); - obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk)); - } else { - FreeChunk* fc = (FreeChunk*)obj_addr; - obj_addr += fc->size(); - } - while (obj_addr < t) { - HeapWord* obj = obj_addr; - obj_addr += block_size(obj_addr); - // If "obj_addr" is not greater than top, then the - // entire object "obj" is within the region. - if (obj_addr <= t) { - if (block_is_obj(obj)) { - oop(obj)->oop_iterate(cl); - } - } else { - // "obj" extends beyond end of region - if (block_is_obj(obj)) { - oop(obj)->oop_iterate(&smr_blk); - } - break; - } - } -} - // NOTE: In the following methods, in order to safely be able to // apply the closure to an object, we need to be sure that the // object has been initialized. We are guaranteed that an object @@ -898,42 +853,60 @@ UpwardsObjectClosure* cl) { assert_locked(freelistLock()); NOT_PRODUCT(verify_objects_initialized()); - Space::object_iterate_mem(mr, cl); + assert(!mr.is_empty(), "Should be non-empty"); + // We use MemRegion(bottom(), end()) rather than used_region() below + // because the two are not necessarily equal for some kinds of + // spaces, in particular, certain kinds of free list spaces. + // We could use the more complicated but more precise: + // MemRegion(used_region().start(), round_to(used_region().end(), CardSize)) + // but the slight imprecision seems acceptable in the assertion check. + assert(MemRegion(bottom(), end()).contains(mr), + "Should be within used space"); + HeapWord* prev = cl->previous(); // max address from last time + if (prev >= mr.end()) { // nothing to do + return; + } + // This assert will not work when we go from cms space to perm + // space, and use same closure. Easy fix deferred for later. XXX YSR + // assert(prev == NULL || contains(prev), "Should be within space"); + + bool last_was_obj_array = false; + HeapWord *blk_start_addr, *region_start_addr; + if (prev > mr.start()) { + region_start_addr = prev; + blk_start_addr = prev; + // The previous invocation may have pushed "prev" beyond the + // last allocated block yet there may be still be blocks + // in this region due to a particular coalescing policy. + // Relax the assertion so that the case where the unallocated + // block is maintained and "prev" is beyond the unallocated + // block does not cause the assertion to fire. + assert((BlockOffsetArrayUseUnallocatedBlock && + (!is_in(prev))) || + (blk_start_addr == block_start(region_start_addr)), "invariant"); + } else { + region_start_addr = mr.start(); + blk_start_addr = block_start(region_start_addr); + } + HeapWord* region_end_addr = mr.end(); + MemRegion derived_mr(region_start_addr, region_end_addr); + while (blk_start_addr < region_end_addr) { + const size_t size = block_size(blk_start_addr); + if (block_is_obj(blk_start_addr)) { + last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); + } else { + last_was_obj_array = false; + } + blk_start_addr += size; + } + if (!last_was_obj_array) { + assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), + "Should be within (closed) used space"); + assert(blk_start_addr > prev, "Invariant"); + cl->set_previous(blk_start_addr); // min address for next time + } } -// Callers of this iterator beware: The closure application should -// be robust in the face of uninitialized objects and should (always) -// return a correct size so that the next addr + size below gives us a -// valid block boundary. [See for instance, -// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() -// in ConcurrentMarkSweepGeneration.cpp.] -HeapWord* -CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) { - assert_lock_strong(freelistLock()); - HeapWord *addr, *last; - size_t size; - for (addr = bottom(), last = end(); - addr < last; addr += size) { - FreeChunk* fc = (FreeChunk*)addr; - if (fc->is_free()) { - // Since we hold the free list lock, which protects direct - // allocation in this generation by mutators, a free object - // will remain free throughout this iteration code. - size = fc->size(); - } else { - // Note that the object need not necessarily be initialized, - // because (for instance) the free list lock does NOT protect - // object initialization. The closure application below must - // therefore be correct in the face of uninitialized objects. - size = cl->do_object_careful(oop(addr)); - if (size == 0) { - // An unparsable object found. Signal early termination. - return addr; - } - } - } - return NULL; -} // Callers of this iterator beware: The closure application should // be robust in the face of uninitialized objects and should (always) @@ -2668,7 +2641,7 @@ // Get the #blocks we want to claim size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); assert(n_blks > 0, "Error"); - assert(ResizePLAB || n_blks == OldPLABSize, "Error"); + assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error"); // In some cases, when the application has a phase change, // there may be a sudden and sharp shift in the object survival // profile, and updating the counts at the end of a scavenge --- ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -351,10 +351,6 @@ unallocated_block() : end()); } - bool is_in(const void* p) const { - return used_region().contains(p); - } - virtual bool is_free_block(const HeapWord* p) const; // Resizing support @@ -364,7 +360,6 @@ Mutex* freelistLock() const { return &_freelistLock; } // Iteration support - void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); void oop_iterate(ExtendedOopClosure* cl); void object_iterate(ObjectClosure* blk); @@ -377,6 +372,12 @@ // obj_is_alive() to determine whether it is safe to iterate of // an object. void safe_object_iterate(ObjectClosure* blk); + + // Iterate over all objects that intersect with mr, calling "cl->do_object" + // on each. There is an exception to this: if this closure has already + // been invoked on an object, it may skip such objects in some cases. This is + // Most likely to happen in an "upwards" (ascending address) iteration of + // MemRegions. void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); // Requires that "mr" be entirely within the space. @@ -385,11 +386,8 @@ // terminate the iteration and return the address of the start of the // subregion that isn't done. Return of "NULL" indicates that the // interation completed. - virtual HeapWord* - object_iterate_careful_m(MemRegion mr, - ObjectClosureCareful* cl); - virtual HeapWord* - object_iterate_careful(ObjectClosureCareful* cl); + HeapWord* object_iterate_careful_m(MemRegion mr, + ObjectClosureCareful* cl); // Override: provides a DCTO_CL specific to this kind of space. DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, --- ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -49,7 +49,7 @@ #include "memory/genCollectedHeap.hpp" #include "memory/genMarkSweep.hpp" #include "memory/genOopClosures.inline.hpp" -#include "memory/iterator.hpp" +#include "memory/iterator.inline.hpp" #include "memory/padded.hpp" #include "memory/referencePolicy.hpp" #include "memory/resourceArea.hpp" @@ -59,6 +59,7 @@ #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/vmThread.hpp" #include "services/memoryService.hpp" #include "services/runtimeService.hpp" @@ -1519,6 +1520,8 @@ gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); + gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin()); + gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end()); gclog_or_tty->print_cr("metadata initialized %d", MetaspaceGC::should_concurrent_collect()); } @@ -1575,11 +1578,33 @@ } if (MetaspaceGC::should_concurrent_collect()) { + if (Verbose && PrintGCDetails) { + gclog_or_tty->print("CMSCollector: collect for metadata allocation "); + } + return true; + } + + // CMSTriggerInterval starts a CMS cycle if enough time has passed. + if (CMSTriggerInterval >= 0) { + if (CMSTriggerInterval == 0) { + // Trigger always + return true; + } + + // Check the CMS time since begin (we do not check the stats validity + // as we want to be able to trigger the first CMS cycle as well) + if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) { if (Verbose && PrintGCDetails) { - gclog_or_tty->print("CMSCollector: collect for metadata allocation "); + if (stats().valid()) { + gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)", + stats().cms_time_since_begin()); + } else { + gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)"); + } } return true; } + } return false; } @@ -2005,7 +2030,7 @@ SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); - GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL); + GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id()); if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " "collections passed to foreground collector", _full_gcs_since_conc_gc); @@ -2515,8 +2540,10 @@ assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), "VM thread should have CMS token"); + // The gc id is created in register_foreground_gc_start if this collection is synchronous + const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id(); NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, - true, NULL);) + true, NULL, gc_id);) if (UseAdaptiveSizePolicy) { size_policy()->ms_collection_begin(); } @@ -3031,22 +3058,21 @@ HandleMark hm; GenCollectedHeap* gch = GenCollectedHeap::heap(); - // Get a clear set of claim bits for the strong roots processing to work with. + // Get a clear set of claim bits for the roots processing to work with. ClassLoaderDataGraph::clear_claimed_marks(); // Mark from roots one level into CMS MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. - gch->gen_process_strong_roots(_cmsGen->level(), - true, // younger gens are roots - true, // activate StrongRootsScope - false, // not scavenging - SharedHeap::ScanningOption(roots_scanning_options()), - ¬Older, - true, // walk code active on stacks - NULL, - NULL); // SSS: Provide correct closure + gch->gen_process_roots(_cmsGen->level(), + true, // younger gens are roots + true, // activate StrongRootsScope + SharedHeap::ScanningOption(roots_scanning_options()), + should_unload_classes(), + ¬Older, + NULL, + NULL); // SSS: Provide correct closure // Now mark from the roots MarkFromRootsClosure markFromRootsClosure(this, _span, @@ -3097,24 +3123,24 @@ HandleMark hm; GenCollectedHeap* gch = GenCollectedHeap::heap(); - // Get a clear set of claim bits for the strong roots processing to work with. + // Get a clear set of claim bits for the roots processing to work with. ClassLoaderDataGraph::clear_claimed_marks(); // Mark from roots one level into CMS MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), markBitMap()); - CMKlassClosure klass_closure(¬Older); + CLDToOopClosure cld_closure(¬Older, true); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. - gch->gen_process_strong_roots(_cmsGen->level(), - true, // younger gens are roots - true, // activate StrongRootsScope - false, // not scavenging - SharedHeap::ScanningOption(roots_scanning_options()), - ¬Older, - true, // walk code active on stacks - NULL, - &klass_closure); + + gch->gen_process_roots(_cmsGen->level(), + true, // younger gens are roots + true, // activate StrongRootsScope + SharedHeap::ScanningOption(roots_scanning_options()), + should_unload_classes(), + ¬Older, + NULL, + &cld_closure); // Now mark from the roots MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, @@ -3175,16 +3201,6 @@ } void -ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { - if (freelistLock()->owned_by_self()) { - Generation::oop_iterate(mr, cl); - } else { - MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); - Generation::oop_iterate(mr, cl); - } -} - -void ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) { if (freelistLock()->owned_by_self()) { Generation::oop_iterate(cl); @@ -3311,12 +3327,10 @@ void CMSCollector::setup_cms_unloading_and_verification_state() { const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC || VerifyBeforeExit; - const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + const int rso = SharedHeap::SO_AllCodeCache; // We set the proper root for this CMS cycle here. if (should_unload_classes()) { // Should unload classes this cycle - remove_root_scanning_option(SharedHeap::SO_AllClasses); - add_root_scanning_option(SharedHeap::SO_SystemClasses); remove_root_scanning_option(rso); // Shrink the root set appropriately set_verifying(should_verify); // Set verification state for this cycle return; // Nothing else needs to be done at this time @@ -3324,8 +3338,6 @@ // Not unloading classes this cycle assert(!should_unload_classes(), "Inconsitency!"); - remove_root_scanning_option(SharedHeap::SO_SystemClasses); - add_root_scanning_option(SharedHeap::SO_AllClasses); if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { // Include symbols, strings and code cache elements to prevent their resurrection. @@ -3533,6 +3545,7 @@ public: CMSPhaseAccounting(CMSCollector *collector, const char *phase, + const GCId gc_id, bool print_cr = true); ~CMSPhaseAccounting(); @@ -3541,6 +3554,7 @@ const char *_phase; elapsedTimer _wallclock; bool _print_cr; + const GCId _gc_id; public: // Not MT-safe; so do not pass around these StackObj's @@ -3556,15 +3570,15 @@ CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, const char *phase, + const GCId gc_id, bool print_cr) : - _collector(collector), _phase(phase), _print_cr(print_cr) { + _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) { if (PrintCMSStatistics != 0) { _collector->resetYields(); } if (PrintGCDetails) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(_gc_id); gclog_or_tty->print_cr("[%s-concurrent-%s-start]", _collector->cmsGen()->short_name(), _phase); } @@ -3578,8 +3592,7 @@ _collector->stopTimer(); _wallclock.stop(); if (PrintGCDetails) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(_gc_id); gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", _collector->cmsGen()->short_name(), _phase, _collector->timerValue(), _wallclock.seconds()); @@ -3677,7 +3690,7 @@ setup_cms_unloading_and_verification_state(); NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork", - PrintGCDetails && Verbose, true, _gc_timer_cm);) + PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) if (UseAdaptiveSizePolicy) { size_policy()->checkpoint_roots_initial_begin(); } @@ -3690,12 +3703,6 @@ ResourceMark rm; HandleMark hm; - FalseClosure falseClosure; - // In the case of a synchronous collection, we will elide the - // remark step, so it's important to catch all the nmethod oops - // in this step. - // The final 'true' flag to gen_process_strong_roots will ensure this. - // If 'async' is true, we can relax the nmethod tracing. MarkRefsIntoClosure notOlder(_span, &_markBitMap); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -3741,17 +3748,16 @@ gch->set_par_threads(0); } else { // The serial version. - CMKlassClosure klass_closure(¬Older); + CLDToOopClosure cld_closure(¬Older, true); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. - gch->gen_process_strong_roots(_cmsGen->level(), - true, // younger gens are roots - true, // activate StrongRootsScope - false, // not scavenging - SharedHeap::ScanningOption(roots_scanning_options()), - ¬Older, - true, // walk all of code cache if (so & SO_CodeCache) - NULL, - &klass_closure); + gch->gen_process_roots(_cmsGen->level(), + true, // younger gens are roots + true, // activate StrongRootsScope + SharedHeap::ScanningOption(roots_scanning_options()), + should_unload_classes(), + ¬Older, + NULL, + &cld_closure); } } @@ -3802,7 +3808,7 @@ CMSTokenSyncWithLocks ts(true, bitMapLock()); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "mark", !PrintGCDetails); + CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); res = markFromRootsWork(asynch); if (res) { _collectorState = Precleaning; @@ -4205,7 +4211,7 @@ pst->all_tasks_completed(); } -class Par_ConcMarkingClosure: public CMSOopClosure { +class Par_ConcMarkingClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; CMSConcMarkingTask* _task; @@ -4218,7 +4224,7 @@ public: Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, CMSBitMap* bit_map, CMSMarkStack* overflow_stack): - CMSOopClosure(collector->ref_processor()), + MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _task(task), _span(collector->_span), @@ -4525,7 +4531,7 @@ _start_sampling = false; } TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails); + CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails); preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); } CMSTokenSync x(true); // is cms thread @@ -4554,7 +4560,7 @@ // we will never do an actual abortable preclean cycle. if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); + CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails); // We need more smarts in the abortable preclean // loop below to deal with cases where allocation // in young gen is very very slow, and our precleaning @@ -4699,7 +4705,7 @@ GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases rp->preclean_discovered_references( rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl, - gc_timer); + gc_timer, _gc_tracer_cm->gc_id()); } if (clean_survivor) { // preclean the active survivor space(s) @@ -4989,7 +4995,7 @@ } class PrecleanKlassClosure : public KlassClosure { - CMKlassClosure _cm_klass_closure; + KlassToOopClosure _cm_klass_closure; public: PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {} void do_klass(Klass* k) { @@ -5042,7 +5048,7 @@ // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", - PrintGCDetails && Verbose, true, _gc_timer_cm);) + PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) int level = _cmsGen->level() - 1; if (level >= 0) { gch->do_collection(true, // full (i.e. force, see below) @@ -5071,7 +5077,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, bool init_mark_was_synchronous) { - NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);) + NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); @@ -5126,11 +5132,11 @@ // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { - GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); do_remark_parallel(); } else { GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, - _gc_timer_cm); + _gc_timer_cm, _gc_tracer_cm->gc_id()); do_remark_non_parallel(); } } @@ -5143,7 +5149,7 @@ verify_overflow_empty(); { - NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);) + NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) refProcessingWork(asynch, clear_all_soft_refs); } verify_work_stacks_empty(); @@ -5227,7 +5233,6 @@ _timer.start(); GenCollectedHeap* gch = GenCollectedHeap::heap(); Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap)); - CMKlassClosure klass_closure(&par_mri_cl); // ---------- young gen roots -------------- { @@ -5243,17 +5248,19 @@ // ---------- remaining roots -------------- _timer.reset(); _timer.start(); - gch->gen_process_strong_roots(_collector->_cmsGen->level(), - false, // yg was scanned above - false, // this is parallel code - false, // not scavenging - SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), - &par_mri_cl, - true, // walk all of code cache if (so & SO_CodeCache) - NULL, - &klass_closure); + + CLDToOopClosure cld_closure(&par_mri_cl, true); + + gch->gen_process_roots(_collector->_cmsGen->level(), + false, // yg was scanned above + false, // this is parallel code + SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), + _collector->should_unload_classes(), + &par_mri_cl, + NULL, + &cld_closure); assert(_collector->should_unload_classes() - || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), + || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); if (PrintCMSStatistics != 0) { @@ -5303,7 +5310,7 @@ }; class RemarkKlassClosure : public KlassClosure { - CMKlassClosure _cm_klass_closure; + KlassToOopClosure _cm_klass_closure; public: RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {} void do_klass(Klass* k) { @@ -5380,17 +5387,17 @@ // ---------- remaining roots -------------- _timer.reset(); _timer.start(); - gch->gen_process_strong_roots(_collector->_cmsGen->level(), - false, // yg was scanned above - false, // this is parallel code - false, // not scavenging - SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), - &par_mrias_cl, - true, // walk all of code cache if (so & SO_CodeCache) - NULL, - NULL); // The dirty klasses will be handled below + gch->gen_process_roots(_collector->_cmsGen->level(), + false, // yg was scanned above + false, // this is parallel code + SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), + _collector->should_unload_classes(), + &par_mrias_cl, + NULL, + NULL); // The dirty klasses will be handled below + assert(_collector->should_unload_classes() - || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), + || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); if (PrintCMSStatistics != 0) { @@ -5443,7 +5450,7 @@ // We might have added oops to ClassLoaderData::_handles during the // concurrent marking phase. These oops point to newly allocated objects // that are guaranteed to be kept alive. Either by the direct allocation - // code, or when the young collector processes the strong roots. Hence, + // code, or when the young collector processes the roots. Hence, // we don't have to revisit the _handles block during the remark phase. // ---------- rescan dirty cards ------------ @@ -5865,7 +5872,7 @@ cms_space, n_workers, workers, task_queues()); - // Set up for parallel process_strong_roots work. + // Set up for parallel process_roots work. gch->set_par_threads(n_workers); // We won't be iterating over the cards in the card table updating // the younger_gen cards, so we shouldn't call the following else @@ -5874,7 +5881,7 @@ // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel // The young gen rescan work will not be done as part of - // process_strong_roots (which currently doesn't knw how to + // process_roots (which currently doesn't know how to // parallelize such a scan), but rather will be broken up into // a set of parallel tasks (via the sampling that the [abortable] // preclean phase did of EdenSpace, plus the [two] tasks of @@ -5928,7 +5935,7 @@ NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { - GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { @@ -5965,29 +5972,29 @@ Universe::verify(); } { - GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); verify_work_stacks_empty(); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. GenCollectedHeap::StrongRootsScope srs(gch); - gch->gen_process_strong_roots(_cmsGen->level(), - true, // younger gens as roots - false, // use the local StrongRootsScope - false, // not scavenging - SharedHeap::ScanningOption(roots_scanning_options()), - &mrias_cl, - true, // walk code active on stacks - NULL, - NULL); // The dirty klasses will be handled below + + gch->gen_process_roots(_cmsGen->level(), + true, // younger gens as roots + false, // use the local StrongRootsScope + SharedHeap::ScanningOption(roots_scanning_options()), + should_unload_classes(), + &mrias_cl, + NULL, + NULL); // The dirty klasses will be handled below assert(should_unload_classes() - || (roots_scanning_options() & SharedHeap::SO_CodeCache), + || (roots_scanning_options() & SharedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); } { - GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); verify_work_stacks_empty(); @@ -6006,7 +6013,7 @@ } { - GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); verify_work_stacks_empty(); @@ -6019,7 +6026,7 @@ // We might have added oops to ClassLoaderData::_handles during the // concurrent marking phase. These oops point to newly allocated objects // that are guaranteed to be kept alive. Either by the direct allocation - // code, or when the young collector processes the strong roots. Hence, + // code, or when the young collector processes the roots. Hence, // we don't have to revisit the _handles block during the remark phase. verify_work_stacks_empty(); @@ -6074,6 +6081,8 @@ }; void CMSRefProcTaskProxy::work(uint worker_id) { + ResourceMark rm; + HandleMark hm; assert(_collector->_span.equals(_span), "Inconsistency in _span"); CMSParKeepAliveClosure par_keep_alive(_collector, _span, _mark_bit_map, @@ -6208,7 +6217,7 @@ _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { - GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); ReferenceProcessorStats stats; if (rp->processing_is_mt()) { @@ -6233,13 +6242,15 @@ &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, &task_executor, - _gc_timer_cm); + _gc_timer_cm, + _gc_tracer_cm->gc_id()); } else { stats = rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, NULL, - _gc_timer_cm); + _gc_timer_cm, + _gc_tracer_cm->gc_id()); } _gc_tracer_cm->report_gc_reference_stats(stats); @@ -6250,7 +6261,7 @@ if (should_unload_classes()) { { - GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); @@ -6263,19 +6274,18 @@ } { - GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } - } - - // CMS doesn't use the StringTable as hard roots when class unloading is turned off. - // Need to check if we really scanned the StringTable. - if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) { - GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm); - // Delete entries for dead interned strings. - StringTable::unlink(&_is_alive_closure); - } + + { + GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + // Delete entries for dead interned strings. + StringTable::unlink(&_is_alive_closure); + } + } + // Restore any preserved marks as a result of mark stack or // work queue overflow @@ -6339,7 +6349,7 @@ _intra_sweep_timer.start(); if (asynch) { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); + CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails); // First sweep the old gen { CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), @@ -6560,7 +6570,7 @@ // Clear the mark bitmap (no grey objects to start with) // for the next cycle. TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails); + CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails); HeapWord* curAddr = _markBitMap.startWord(); while (curAddr < _markBitMap.endWord()) { @@ -6626,7 +6636,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); + GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id()); TraceCollectorStats tcs(counters()); switch (op) { @@ -7744,7 +7754,7 @@ CMSCollector* collector, MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm, CMSMarkStack* mark_stack): - CMSOopClosure(collector->ref_processor()), + MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _span(span), _verification_bm(verification_bm), @@ -7797,7 +7807,7 @@ MemRegion span, CMSBitMap* bitMap, CMSMarkStack* markStack, HeapWord* finger, MarkFromRootsClosure* parent) : - CMSOopClosure(collector->ref_processor()), + MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _span(span), _bitMap(bitMap), @@ -7814,7 +7824,7 @@ HeapWord* finger, HeapWord** global_finger_addr, Par_MarkFromRootsClosure* parent) : - CMSOopClosure(collector->ref_processor()), + MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _whole_span(collector->_span), _span(span), @@ -7863,11 +7873,6 @@ _overflow_stack->expand(); // expand the stack if possible } -void CMKlassClosure::do_klass(Klass* k) { - assert(_oop_closure != NULL, "Not initialized?"); - k->oops_do(_oop_closure); -} - void PushOrMarkClosure::do_oop(oop obj) { // Ignore mark word because we are running concurrent with mutators. assert(obj->is_oop_or_null(true), "expected an oop or NULL"); @@ -7965,7 +7970,7 @@ CMSBitMap* mod_union_table, CMSMarkStack* mark_stack, bool concurrent_precleaning): - CMSOopClosure(rp), + MetadataAwareOopClosure(rp), _collector(collector), _span(span), _bit_map(bit_map), @@ -8038,7 +8043,7 @@ ReferenceProcessor* rp, CMSBitMap* bit_map, OopTaskQueue* work_queue): - CMSOopClosure(rp), + MetadataAwareOopClosure(rp), _collector(collector), _span(span), _bit_map(bit_map), --- ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -32,6 +32,7 @@ #include "gc_implementation/shared/generationCounters.hpp" #include "memory/freeBlockDictionary.hpp" #include "memory/generation.hpp" +#include "memory/iterator.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/virtualspace.hpp" #include "services/memoryService.hpp" @@ -1289,7 +1290,6 @@ void save_sweep_limit(); // More iteration support - virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); virtual void oop_iterate(ExtendedOopClosure* cl); virtual void safe_object_iterate(ObjectClosure* cl); virtual void object_iterate(ObjectClosure* cl); @@ -1387,13 +1387,6 @@ // Closures of various sorts used by CMS to accomplish its work // -// This closure is used to check that a certain set of oops is empty. -class FalseClosure: public OopClosure { - public: - void do_oop(oop* p) { guarantee(false, "Should be an empty set"); } - void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); } -}; - // This closure is used to do concurrent marking from the roots // following the first checkpoint. class MarkFromRootsClosure: public BitMapClosure { @@ -1458,7 +1451,7 @@ // The following closures are used to do certain kinds of verification of // CMS marking. -class PushAndMarkVerifyClosure: public CMSOopClosure { +class PushAndMarkVerifyClosure: public MetadataAwareOopClosure { CMSCollector* _collector; MemRegion _span; CMSBitMap* _verification_bm; @@ -1511,6 +1504,19 @@ } }; +// A version of ObjectClosure with "memory" (see _previous_address below) +class UpwardsObjectClosure: public BoolObjectClosure { + HeapWord* _previous_address; + public: + UpwardsObjectClosure() : _previous_address(NULL) { } + void set_previous(HeapWord* addr) { _previous_address = addr; } + HeapWord* previous() { return _previous_address; } + // A return value of "true" can be used by the caller to decide + // if this object's end should *NOT* be recorded in + // _previous_address above. + virtual bool do_object_bm(oop obj, MemRegion mr) = 0; +}; + // This closure is used during the second checkpointing phase // to rescan the marked objects on the dirty cards in the mod // union table and the card table proper. It's invoked via --- ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -50,8 +50,12 @@ void VM_CMS_Operation::acquire_pending_list_lock() { // The caller may block while communicating // with the SLT thread in order to acquire/release the PLL. - ConcurrentMarkSweepThread::slt()-> - manipulatePLL(SurrogateLockerThread::acquirePLL); + SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt(); + if (slt != NULL) { + slt->manipulatePLL(SurrogateLockerThread::acquirePLL); + } else { + SurrogateLockerThread::report_missing_slt(); + } } void VM_CMS_Operation::release_and_notify_pending_list_lock() { @@ -64,7 +68,7 @@ void VM_CMS_Operation::verify_before_gc() { if (VerifyBeforeGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm); + GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id()); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -76,7 +80,7 @@ void VM_CMS_Operation::verify_after_gc() { if (VerifyAfterGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm); + GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id()); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/bufferingOopClosure.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/bufferingOopClosure.hpp" +#include "memory/iterator.hpp" +#include "utilities/debug.hpp" + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +class TestBufferingOopClosure { + + // Helper class to fake a set of oop*s and narrowOop*s. + class FakeRoots { + public: + // Used for sanity checking of the values passed to the do_oops functions in the test. + static const uintptr_t NarrowOopMarker = uintptr_t(1) << (BitsPerWord -1); + + int _num_narrow; + int _num_full; + void** _narrow; + void** _full; + + FakeRoots(int num_narrow, int num_full) : + _num_narrow(num_narrow), + _num_full(num_full), + _narrow((void**)::malloc(sizeof(void*) * num_narrow)), + _full((void**)::malloc(sizeof(void*) * num_full)) { + + for (int i = 0; i < num_narrow; i++) { + _narrow[i] = (void*)(NarrowOopMarker + (uintptr_t)i); + } + for (int i = 0; i < num_full; i++) { + _full[i] = (void*)(uintptr_t)i; + } + } + + ~FakeRoots() { + ::free(_narrow); + ::free(_full); + } + + void oops_do_narrow_then_full(OopClosure* cl) { + for (int i = 0; i < _num_narrow; i++) { + cl->do_oop((narrowOop*)_narrow[i]); + } + for (int i = 0; i < _num_full; i++) { + cl->do_oop((oop*)_full[i]); + } + } + + void oops_do_full_then_narrow(OopClosure* cl) { + for (int i = 0; i < _num_full; i++) { + cl->do_oop((oop*)_full[i]); + } + for (int i = 0; i < _num_narrow; i++) { + cl->do_oop((narrowOop*)_narrow[i]); + } + } + + void oops_do_mixed(OopClosure* cl) { + int i; + for (i = 0; i < _num_full && i < _num_narrow; i++) { + cl->do_oop((oop*)_full[i]); + cl->do_oop((narrowOop*)_narrow[i]); + } + for (int j = i; j < _num_full; j++) { + cl->do_oop((oop*)_full[i]); + } + for (int j = i; j < _num_narrow; j++) { + cl->do_oop((narrowOop*)_narrow[i]); + } + } + + static const int MaxOrder = 2; + + void oops_do(OopClosure* cl, int do_oop_order) { + switch(do_oop_order) { + case 0: + oops_do_narrow_then_full(cl); + break; + case 1: + oops_do_full_then_narrow(cl); + break; + case 2: + oops_do_mixed(cl); + break; + default: + oops_do_narrow_then_full(cl); + break; + } + } + }; + + class CountOopClosure : public OopClosure { + int _narrow_oop_count; + int _full_oop_count; + public: + CountOopClosure() : _narrow_oop_count(0), _full_oop_count(0) {} + void do_oop(narrowOop* p) { + assert((uintptr_t(p) & FakeRoots::NarrowOopMarker) != 0, + "The narrowOop was unexpectedly not marked with the NarrowOopMarker"); + _narrow_oop_count++; + } + + void do_oop(oop* p){ + assert((uintptr_t(p) & FakeRoots::NarrowOopMarker) == 0, + "The oop was unexpectedly marked with the NarrowOopMarker"); + _full_oop_count++; + } + + int narrow_oop_count() { return _narrow_oop_count; } + int full_oop_count() { return _full_oop_count; } + int all_oop_count() { return _narrow_oop_count + _full_oop_count; } + }; + + class DoNothingOopClosure : public OopClosure { + public: + void do_oop(narrowOop* p) {} + void do_oop(oop* p) {} + }; + + static void testCount(int num_narrow, int num_full, int do_oop_order) { + FakeRoots fr(num_narrow, num_full); + + CountOopClosure coc; + BufferingOopClosure boc(&coc); + + fr.oops_do(&boc, do_oop_order); + + boc.done(); + + #define assert_testCount(got, expected) \ + assert((got) == (expected), \ + err_msg("Expected: %d, got: %d, when running testCount(%d, %d, %d)", \ + (got), (expected), num_narrow, num_full, do_oop_order)) + + assert_testCount(num_narrow, coc.narrow_oop_count()); + assert_testCount(num_full, coc.full_oop_count()); + assert_testCount(num_narrow + num_full, coc.all_oop_count()); + } + + static void testCount() { + int buffer_length = BufferingOopClosure::BufferLength; + + for (int order = 0; order < FakeRoots::MaxOrder; order++) { + testCount(0, 0, order); + testCount(10, 0, order); + testCount(0, 10, order); + testCount(10, 10, order); + testCount(buffer_length, 10, order); + testCount(10, buffer_length, order); + testCount(buffer_length, buffer_length, order); + testCount(buffer_length + 1, 10, order); + testCount(10, buffer_length + 1, order); + testCount(buffer_length + 1, buffer_length, order); + testCount(buffer_length, buffer_length + 1, order); + testCount(buffer_length + 1, buffer_length + 1, order); + } + } + + static void testIsBufferEmptyOrFull(int num_narrow, int num_full, bool expect_empty, bool expect_full) { + FakeRoots fr(num_narrow, num_full); + + DoNothingOopClosure cl; + BufferingOopClosure boc(&cl); + + fr.oops_do(&boc, 0); + + #define assert_testIsBufferEmptyOrFull(got, expected) \ + assert((got) == (expected), \ + err_msg("Expected: %d, got: %d. testIsBufferEmptyOrFull(%d, %d, %s, %s)", \ + (got), (expected), num_narrow, num_full, \ + BOOL_TO_STR(expect_empty), BOOL_TO_STR(expect_full))) + + assert_testIsBufferEmptyOrFull(expect_empty, boc.is_buffer_empty()); + assert_testIsBufferEmptyOrFull(expect_full, boc.is_buffer_full()); + } + + static void testIsBufferEmptyOrFull() { + int bl = BufferingOopClosure::BufferLength; + + testIsBufferEmptyOrFull(0, 0, true, false); + testIsBufferEmptyOrFull(1, 0, false, false); + testIsBufferEmptyOrFull(0, 1, false, false); + testIsBufferEmptyOrFull(1, 1, false, false); + testIsBufferEmptyOrFull(10, 0, false, false); + testIsBufferEmptyOrFull(0, 10, false, false); + testIsBufferEmptyOrFull(10, 10, false, false); + testIsBufferEmptyOrFull(0, bl, false, true); + testIsBufferEmptyOrFull(bl, 0, false, true); + testIsBufferEmptyOrFull(bl/2, bl/2, false, true); + testIsBufferEmptyOrFull(bl-1, 1, false, true); + testIsBufferEmptyOrFull(1, bl-1, false, true); + // Processed + testIsBufferEmptyOrFull(bl+1, 0, false, false); + testIsBufferEmptyOrFull(bl*2, 0, false, true); + } + + static void testEmptyAfterDone(int num_narrow, int num_full) { + FakeRoots fr(num_narrow, num_full); + + DoNothingOopClosure cl; + BufferingOopClosure boc(&cl); + + fr.oops_do(&boc, 0); + + // Make sure all get processed. + boc.done(); + + assert(boc.is_buffer_empty(), + err_msg("Should be empty after call to done(). testEmptyAfterDone(%d, %d)", + num_narrow, num_full)); + } + + static void testEmptyAfterDone() { + int bl = BufferingOopClosure::BufferLength; + + testEmptyAfterDone(0, 0); + testEmptyAfterDone(1, 0); + testEmptyAfterDone(0, 1); + testEmptyAfterDone(1, 1); + testEmptyAfterDone(10, 0); + testEmptyAfterDone(0, 10); + testEmptyAfterDone(10, 10); + testEmptyAfterDone(0, bl); + testEmptyAfterDone(bl, 0); + testEmptyAfterDone(bl/2, bl/2); + testEmptyAfterDone(bl-1, 1); + testEmptyAfterDone(1, bl-1); + // Processed + testEmptyAfterDone(bl+1, 0); + testEmptyAfterDone(bl*2, 0); + } + + public: + static void test() { + testCount(); + testIsBufferEmptyOrFull(); + testEmptyAfterDone(); + } +}; + +void TestBufferingOopClosure_test() { + TestBufferingOopClosure::test(); +} + +#endif --- ./hotspot/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,10 +25,10 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP -#include "memory/genOopClosures.hpp" -#include "memory/generation.hpp" +#include "memory/iterator.hpp" +#include "oops/oopsHierarchy.hpp" #include "runtime/os.hpp" -#include "utilities/taskqueue.hpp" +#include "utilities/debug.hpp" // A BufferingOops closure tries to separate out the cost of finding roots // from the cost of applying closures to them. It maintains an array of @@ -41,60 +41,103 @@ // The caller must be sure to call "done" to process any unprocessed // buffered entriess. -class Generation; -class HeapRegion; +class BufferingOopClosure: public OopClosure { + friend class TestBufferingOopClosure; +protected: + static const size_t BufferLength = 1024; -class BufferingOopClosure: public OopClosure { -protected: - enum PrivateConstants { - BufferLength = 1024 - }; - - StarTask _buffer[BufferLength]; - StarTask* _buffer_top; - StarTask* _buffer_curr; + // We need to know if the buffered addresses contain oops or narrowOops. + // We can't tag the addresses the way StarTask does, because we need to + // be able to handle unaligned addresses coming from oops embedded in code. + // + // The addresses for the full-sized oops are filled in from the bottom, + // while the addresses for the narrowOops are filled in from the top. + OopOrNarrowOopStar _buffer[BufferLength]; + OopOrNarrowOopStar* _oop_top; + OopOrNarrowOopStar* _narrowOop_bottom; OopClosure* _oc; double _closure_app_seconds; - void process_buffer () { + + bool is_buffer_empty() { + return _oop_top == _buffer && _narrowOop_bottom == (_buffer + BufferLength - 1); + } + + bool is_buffer_full() { + return _narrowOop_bottom < _oop_top; + } + + // Process addresses containing full-sized oops. + void process_oops() { + for (OopOrNarrowOopStar* curr = _buffer; curr < _oop_top; ++curr) { + _oc->do_oop((oop*)(*curr)); + } + _oop_top = _buffer; + } + + // Process addresses containing narrow oops. + void process_narrowOops() { + for (OopOrNarrowOopStar* curr = _buffer + BufferLength - 1; curr > _narrowOop_bottom; --curr) { + _oc->do_oop((narrowOop*)(*curr)); + } + _narrowOop_bottom = _buffer + BufferLength - 1; + } + + // Apply the closure to all oops and clear the buffer. + // Accumulate the time it took. + void process_buffer() { double start = os::elapsedTime(); - for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) { - if (curr->is_narrow()) { - assert(UseCompressedOops, "Error"); - _oc->do_oop((narrowOop*)(*curr)); - } else { - _oc->do_oop((oop*)(*curr)); - } - } - _buffer_curr = _buffer; + + process_oops(); + process_narrowOops(); + _closure_app_seconds += (os::elapsedTime() - start); } - template inline void do_oop_work(T* p) { - if (_buffer_curr == _buffer_top) { + void process_buffer_if_full() { + if (is_buffer_full()) { process_buffer(); } - StarTask new_ref(p); - *_buffer_curr = new_ref; - ++_buffer_curr; + } + + void add_narrowOop(narrowOop* p) { + assert(!is_buffer_full(), "Buffer should not be full"); + *_narrowOop_bottom = (OopOrNarrowOopStar)p; + _narrowOop_bottom--; + } + + void add_oop(oop* p) { + assert(!is_buffer_full(), "Buffer should not be full"); + *_oop_top = (OopOrNarrowOopStar)p; + _oop_top++; } public: - virtual void do_oop(narrowOop* p) { do_oop_work(p); } - virtual void do_oop(oop* p) { do_oop_work(p); } + virtual void do_oop(narrowOop* p) { + process_buffer_if_full(); + add_narrowOop(p); + } - void done () { - if (_buffer_curr > _buffer) { + virtual void do_oop(oop* p) { + process_buffer_if_full(); + add_oop(p); + } + + void done() { + if (!is_buffer_empty()) { process_buffer(); } } - double closure_app_seconds () { + + double closure_app_seconds() { return _closure_app_seconds; } - BufferingOopClosure (OopClosure *oc) : + + BufferingOopClosure(OopClosure *oc) : _oc(oc), - _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength), + _oop_top(_buffer), + _narrowOop_bottom(_buffer + BufferLength - 1), _closure_app_seconds(0.0) { } }; --- ./hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,7 +29,7 @@ #include "gc_implementation/g1/g1HotCardCache.hpp" #include "runtime/java.hpp" -ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) : +ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure) : _threads(NULL), _n_threads(0), _hot_card_cache(g1h) { @@ -61,7 +61,7 @@ ConcurrentG1RefineThread *next = NULL; for (uint i = _n_threads - 1; i != UINT_MAX; i--) { - ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i); + ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, refine_closure, worker_id_offset, i); assert(t != NULL, "Conc refine should have been created"); if (t->osthread() == NULL) { vm_shutdown_during_initialization("Could not create ConcurrentG1RefineThread"); @@ -81,8 +81,8 @@ } } -void ConcurrentG1Refine::init() { - _hot_card_cache.initialize(); +void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) { + _hot_card_cache.initialize(card_counts_storage); } void ConcurrentG1Refine::stop() { @@ -128,9 +128,7 @@ } uint ConcurrentG1Refine::thread_num() { - uint n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads - : ParallelGCThreads; - return MAX2(n_threads, 1); + return G1ConcRefinementThreads; } void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const { --- ./hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -34,6 +34,7 @@ class ConcurrentG1RefineThread; class G1CollectedHeap; class G1HotCardCache; +class G1RegionToSpaceMapper; class G1RemSet; class DirtyCardQueue; @@ -71,10 +72,10 @@ void reset_threshold_step(); public: - ConcurrentG1Refine(G1CollectedHeap* g1h); + ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure); ~ConcurrentG1Refine(); - void init(); // Accomplish some initialization that has to wait. + void init(G1RegionToSpaceMapper* card_counts_storage); void stop(); void reinitialize_threads(); --- ./hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -33,8 +33,10 @@ ConcurrentG1RefineThread:: ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next, + CardTableEntryClosure* refine_closure, uint worker_id_offset, uint worker_id) : ConcurrentGCThread(), + _refine_closure(refine_closure), _worker_id_offset(worker_id_offset), _worker_id(worker_id), _active(false), @@ -71,6 +73,7 @@ } void ConcurrentG1RefineThread::sample_young_list_rs_lengths() { + SuspendibleThreadSetJoiner sts; G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectorPolicy* g1p = g1h->g1_policy(); if (g1p->adaptive_young_list_length()) { @@ -82,8 +85,8 @@ // we try to yield every time we visit 10 regions if (regions_visited == 10) { - if (_sts.should_yield()) { - _sts.yield("G1 refine"); + if (sts.should_yield()) { + sts.yield(); // we just abandon the iteration break; } @@ -99,9 +102,7 @@ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); _vtime_start = os::elapsedVTime(); while(!_should_terminate) { - _sts.join(); sample_young_list_rs_lengths(); - _sts.leave(); if (os::supports_vtime()) { _vtime_accum = (os::elapsedVTime() - _vtime_start); @@ -182,37 +183,37 @@ break; } - _sts.join(); + { + SuspendibleThreadSetJoiner sts; - do { - int curr_buffer_num = (int)dcqs.completed_buffers_num(); - // If the number of the buffers falls down into the yellow zone, - // that means that the transition period after the evacuation pause has ended. - if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) { - dcqs.set_completed_queue_padding(0); + do { + int curr_buffer_num = (int)dcqs.completed_buffers_num(); + // If the number of the buffers falls down into the yellow zone, + // that means that the transition period after the evacuation pause has ended. + if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) { + dcqs.set_completed_queue_padding(0); + } + + if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) { + // If the number of the buffer has fallen below our threshold + // we should deactivate. The predecessor will reactivate this + // thread should the number of the buffers cross the threshold again. + deactivate(); + break; + } + + // Check if we need to activate the next thread. + if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) { + _next->activate(); + } + } while (dcqs.apply_closure_to_completed_buffer(_refine_closure, _worker_id + _worker_id_offset, cg1r()->green_zone())); + + // We can exit the loop above while being active if there was a yield request. + if (is_active()) { + deactivate(); } - - if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) { - // If the number of the buffer has fallen below our threshold - // we should deactivate. The predecessor will reactivate this - // thread should the number of the buffers cross the threshold again. - deactivate(); - break; - } - - // Check if we need to activate the next thread. - if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) { - _next->activate(); - } - } while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, cg1r()->green_zone())); - - // We can exit the loop above while being active if there was a yield request. - if (is_active()) { - deactivate(); } - _sts.leave(); - if (os::supports_vtime()) { _vtime_accum = (os::elapsedVTime() - _vtime_start); } else { @@ -223,17 +224,6 @@ terminate(); } - -void ConcurrentG1RefineThread::yield() { - if (G1TraceConcRefinement) { - gclog_or_tty->print_cr("G1-Refine-yield"); - } - _sts.yield("G1 refine"); - if (G1TraceConcRefinement) { - gclog_or_tty->print_cr("G1-Refine-yield-end"); - } -} - void ConcurrentG1RefineThread::stop() { // it is ok to take late safepoints here, if needed { --- ./hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,6 +28,7 @@ #include "gc_implementation/shared/concurrentGCThread.hpp" // Forward Decl. +class CardTableEntryClosure; class ConcurrentG1Refine; // The G1 Concurrent Refinement Thread (could be several in the future). @@ -49,6 +50,9 @@ Monitor* _monitor; ConcurrentG1Refine* _cg1r; + // The closure applied to completed log buffers. + CardTableEntryClosure* _refine_closure; + int _thread_threshold_step; // This thread activation threshold int _threshold; @@ -64,13 +68,11 @@ void activate(); void deactivate(); - // For use by G1CollectedHeap, which is a friend. - static SuspendibleThreadSet* sts() { return &_sts; } - public: virtual void run(); // Constructor ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next, + CardTableEntryClosure* refine_closure, uint worker_id_offset, uint worker_id); void initialize(); @@ -84,8 +86,6 @@ ConcurrentG1Refine* cg1r() { return _cg1r; } - // Yield for GC - void yield(); // shutdown void stop(); }; --- ./hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -23,7 +23,9 @@ */ #include "precompiled.hpp" +#include "classfile/metadataOnStackMark.hpp" #include "classfile/symbolTable.hpp" +#include "code/codeCache.hpp" #include "gc_implementation/g1/concurrentMark.inline.hpp" #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" @@ -33,18 +35,21 @@ #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/heapRegion.inline.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionSet.inline.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" #include "gc_implementation/shared/gcTimer.hpp" #include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTraceTime.hpp" +#include "memory/allocation.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/referencePolicy.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" +#include "runtime/prefetch.inline.hpp" #include "services/memTracker.hpp" // Concurrent marking bit map wrapper @@ -56,8 +61,8 @@ _bmWordSize = 0; } -HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, - HeapWord* limit) const { +HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, + const HeapWord* limit) const { // First we must round addr *up* to a possible object boundary. addr = (HeapWord*)align_size_up((intptr_t)addr, HeapWordSize << _shifter); @@ -74,8 +79,8 @@ return nextAddr; } -HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, - HeapWord* limit) const { +HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, + const HeapWord* limit) const { size_t addrOffset = heapWordToOffset(addr); if (limit == NULL) { limit = _bmStartWord + _bmWordSize; @@ -95,12 +100,12 @@ } #ifndef PRODUCT -bool CMBitMapRO::covers(ReservedSpace heap_rs) const { +bool CMBitMapRO::covers(MemRegion heap_rs) const { // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, "size inconsistency"); - return _bmStartWord == (HeapWord*)(heap_rs.base()) && - _bmWordSize == heap_rs.size()>>LogHeapWordSize; + return _bmStartWord == (HeapWord*)(heap_rs.start()) && + _bmWordSize == heap_rs.word_size(); } #endif @@ -108,33 +113,76 @@ _bm.print_on_error(st, prefix); } -bool CMBitMap::allocate(ReservedSpace heap_rs) { - _bmStartWord = (HeapWord*)(heap_rs.base()); - _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes - ReservedSpace brs(ReservedSpace::allocation_align_size_up( - (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); - if (!brs.is_reserved()) { - warning("ConcurrentMark marking bit map allocation failure"); +size_t CMBitMap::compute_size(size_t heap_size) { + return heap_size / mark_distance(); +} + +size_t CMBitMap::mark_distance() { + return MinObjAlignmentInBytes * BitsPerByte; +} + +void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { + _bmStartWord = heap.start(); + _bmWordSize = heap.word_size(); + + _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); + _bm.set_size(_bmWordSize >> _shifter); + + storage->set_mapping_changed_listener(&_listener); +} + +void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { + if (zero_filled) { + return; + } + // We need to clear the bitmap on commit, removing any existing information. + MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); + _bm->clearRange(mr); +} + +// Closure used for clearing the given mark bitmap. +class ClearBitmapHRClosure : public HeapRegionClosure { + private: + ConcurrentMark* _cm; + CMBitMap* _bitmap; + bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. + public: + ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { + assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); + } + + virtual bool doHeapRegion(HeapRegion* r) { + size_t const chunk_size_in_words = M / HeapWordSize; + + HeapWord* cur = r->bottom(); + HeapWord* const end = r->end(); + + while (cur < end) { + MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); + _bitmap->clearRange(mr); + + cur += chunk_size_in_words; + + // Abort iteration if after yielding the marking has been aborted. + if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { + return true; + } + // Repeat the asserts from before the start of the closure. We will do them + // as asserts here to minimize their overhead on the product. However, we + // will have them as guarantees at the beginning / end of the bitmap + // clearing to get some checking in the product. + assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); + assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); + } + return false; } - MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); - // For now we'll just commit all of the bit map up front. - // Later on we'll try to be more parsimonious with swap. - if (!_virtual_space.initialize(brs, brs.size())) { - warning("ConcurrentMark marking bit map backing store failure"); - return false; - } - assert(_virtual_space.committed_size() == brs.size(), - "didn't reserve backing store for all of concurrent marking bit map?"); - _bm.set_map((uintptr_t*)_virtual_space.low()); - assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= - _bmWordSize, "inconsistency in bit map sizing"); - _bm.set_size(_bmWordSize >> _shifter); - return true; -} +}; void CMBitMap::clearAll() { - _bm.clear(); + ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); + G1CollectedHeap::heap()->heap_region_iterate(&cl); + guarantee(cl.complete(), "Must have completed iteration."); return; } @@ -389,10 +437,6 @@ } } -bool ConcurrentMark::not_yet_marked(oop obj) const { - return _g1h->is_obj_ill(obj); -} - CMRootRegions::CMRootRegions() : _young_list(NULL), _cm(NULL), _scan_in_progress(false), _should_abort(false), _next_survivor(NULL) { } @@ -479,10 +523,10 @@ return MAX2((n_par_threads + 2) / 4, 1U); } -ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : +ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : _g1h(g1h), - _markBitMap1(log2_intptr(MinObjAlignment)), - _markBitMap2(log2_intptr(MinObjAlignment)), + _markBitMap1(), + _markBitMap2(), _parallel_marking_threads(0), _max_parallel_marking_threads(0), _sleep_factor(0.0), @@ -491,7 +535,7 @@ _cleanup_task_overhead(1.0), _cleanup_list("Cleanup List"), _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), - _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >> + _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> CardTableModRefBS::card_shift, false /* in_resource_area*/), @@ -510,6 +554,7 @@ _has_overflown(false), _concurrent(false), _has_aborted(false), + _aborted_gc_id(GCId::undefined()), _restart_for_overflow(false), _concurrent_marking_in_progress(false), @@ -540,14 +585,8 @@ "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); } - if (!_markBitMap1.allocate(heap_rs)) { - warning("Failed to allocate first CM bit map"); - return; - } - if (!_markBitMap2.allocate(heap_rs)) { - warning("Failed to allocate second CM bit map"); - return; - } + _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); + _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); // Create & start a ConcurrentMark thread. _cmThread = new ConcurrentMarkThread(this); @@ -558,8 +597,8 @@ } assert(CGC_lock != NULL, "Where's the CGC_lock?"); - assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); - assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency"); + assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); + assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); satb_qs.set_buffer_size(G1SATBBufferSize); @@ -719,38 +758,17 @@ clear_all_count_data(); // so that the call below can read a sensible value - _heap_start = (HeapWord*) heap_rs.base(); + _heap_start = g1h->reserved_region().start(); set_non_marking_state(); _completed_initialization = true; } -void ConcurrentMark::update_g1_committed(bool force) { - // If concurrent marking is not in progress, then we do not need to - // update _heap_end. - if (!concurrent_marking_in_progress() && !force) return; - - MemRegion committed = _g1h->g1_committed(); - assert(committed.start() == _heap_start, "start shouldn't change"); - HeapWord* new_end = committed.end(); - if (new_end > _heap_end) { - // The heap has been expanded. - - _heap_end = new_end; - } - // Notice that the heap can also shrink. However, this only happens - // during a Full GC (at least currently) and the entire marking - // phase will bail out and the task will not be restarted. So, let's - // do nothing. -} - void ConcurrentMark::reset() { // Starting values for these two. This should be called in a STW - // phase. CM will be notified of any future g1_committed expansions - // will be at the end of evacuation pauses, when tasks are - // inactive. - MemRegion committed = _g1h->g1_committed(); - _heap_start = committed.start(); - _heap_end = committed.end(); + // phase. + MemRegion reserved = _g1h->g1_reserved(); + _heap_start = reserved.start(); + _heap_end = reserved.end(); // Separated the asserts so that we know which one fires. assert(_heap_start != NULL, "heap bounds should look ok"); @@ -822,7 +840,6 @@ assert(out_of_regions(), err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, p2i(_finger), p2i(_heap_end))); - update_g1_committed(true); } } @@ -841,7 +858,6 @@ void ConcurrentMark::clearNextBitmap() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1CollectorPolicy* g1p = g1h->g1_policy(); // Make sure that the concurrent mark thread looks to still be in // the current cycle. @@ -853,39 +869,47 @@ // is the case. guarantee(!g1h->mark_in_progress(), "invariant"); - // clear the mark bitmap (no grey objects to start with). - // We need to do this in chunks and offer to yield in between - // each chunk. - HeapWord* start = _nextMarkBitMap->startWord(); - HeapWord* end = _nextMarkBitMap->endWord(); - HeapWord* cur = start; - size_t chunkSize = M; - while (cur < end) { - HeapWord* next = cur + chunkSize; - if (next > end) { - next = end; - } - MemRegion mr(cur,next); - _nextMarkBitMap->clearRange(mr); - cur = next; - do_yield_check(); - - // Repeat the asserts from above. We'll do them as asserts here to - // minimize their overhead on the product. However, we'll have - // them as guarantees at the beginning / end of the bitmap - // clearing to get some checking in the product. - assert(cmThread()->during_cycle(), "invariant"); - assert(!g1h->mark_in_progress(), "invariant"); + ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); + g1h->heap_region_iterate(&cl); + + // Clear the liveness counting data. If the marking has been aborted, the abort() + // call already did that. + if (cl.complete()) { + clear_all_count_data(); } - // Clear the liveness counting data - clear_all_count_data(); - // Repeat the asserts from above. guarantee(cmThread()->during_cycle(), "invariant"); guarantee(!g1h->mark_in_progress(), "invariant"); } +class CheckBitmapClearHRClosure : public HeapRegionClosure { + CMBitMap* _bitmap; + bool _error; + public: + CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { + } + + virtual bool doHeapRegion(HeapRegion* r) { + // This closure can be called concurrently to the mutator, so we must make sure + // that the result of the getNextMarkedWordAddress() call is compared to the + // value passed to it as limit to detect any found bits. + // We can use the region's orig_end() for the limit and the comparison value + // as it always contains the "real" end of the region that never changes and + // has no side effects. + // Due to the latter, there can also be no problem with the compiler generating + // reloads of the orig_end() call. + HeapWord* end = r->orig_end(); + return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; + } +}; + +bool ConcurrentMark::nextMarkBitmapIsClear() { + CheckBitmapClearHRClosure cl(_nextMarkBitMap); + _g1h->heap_region_iterate(&cl); + return cl.complete(); +} + class NoteStartOfMarkHRClosure: public HeapRegionClosure { public: bool doHeapRegion(HeapRegion* r) { @@ -976,13 +1000,13 @@ } if (concurrent()) { - ConcurrentGCThread::stsLeave(); + SuspendibleThreadSet::leave(); } bool barrier_aborted = !_first_overflow_barrier_sync.enter(); if (concurrent()) { - ConcurrentGCThread::stsJoin(); + SuspendibleThreadSet::join(); } // at this point everyone should have synced up and not be doing any // more work @@ -1019,8 +1043,7 @@ force_overflow()->update(); if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); } } @@ -1036,13 +1059,13 @@ } if (concurrent()) { - ConcurrentGCThread::stsLeave(); + SuspendibleThreadSet::leave(); } bool barrier_aborted = !_second_overflow_barrier_sync.enter(); if (concurrent()) { - ConcurrentGCThread::stsJoin(); + SuspendibleThreadSet::join(); } // at this point everything should be re-initialized and ready to go @@ -1094,7 +1117,7 @@ double start_vtime = os::elapsedVTime(); - ConcurrentGCThread::stsJoin(); + SuspendibleThreadSet::join(); assert(worker_id < _cm->active_tasks(), "invariant"); CMTask* the_task = _cm->task(worker_id); @@ -1102,46 +1125,32 @@ if (!_cm->has_aborted()) { do { double start_vtime_sec = os::elapsedVTime(); - double start_time_sec = os::elapsedTime(); double mark_step_duration_ms = G1ConcMarkStepDurationMillis; the_task->do_marking_step(mark_step_duration_ms, true /* do_termination */, false /* is_serial*/); - double end_time_sec = os::elapsedTime(); double end_vtime_sec = os::elapsedVTime(); double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; - double elapsed_time_sec = end_time_sec - start_time_sec; _cm->clear_has_overflown(); - bool ret = _cm->do_yield_check(worker_id); + _cm->do_yield_check(worker_id); jlong sleep_time_ms; if (!_cm->has_aborted() && the_task->has_aborted()) { sleep_time_ms = (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); - ConcurrentGCThread::stsLeave(); + SuspendibleThreadSet::leave(); os::sleep(Thread::current(), sleep_time_ms, false); - ConcurrentGCThread::stsJoin(); + SuspendibleThreadSet::join(); } - double end_time2_sec = os::elapsedTime(); - double elapsed_time2_sec = end_time2_sec - start_time_sec; - -#if 0 - gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " - "overhead %1.4lf", - elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, - the_task->conc_overhead(os::elapsedTime()) * 8.0); - gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", - elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); -#endif } while (!_cm->has_aborted() && the_task->has_aborted()); } the_task->record_end_time(); guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); - ConcurrentGCThread::stsLeave(); + SuspendibleThreadSet::leave(); double end_vtime = os::elapsedVTime(); _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); @@ -1221,6 +1230,9 @@ }; void ConcurrentMark::scanRootRegions() { + // Start of concurrent marking. + ClassLoaderDataGraph::clear_claimed_marks(); + // scan_in_progress() will have been set to true only if there was // at least one root region to scan. So, if it's false, we // should not attempt to do any further work. @@ -1269,7 +1281,7 @@ CMConcurrentMarkingTask markingTask(this, cmThread()); if (use_parallel_marking_threads()) { _parallel_workers->set_active_workers((int)active_workers); - // Don't set _n_par_threads because it affects MT in proceess_strong_roots() + // Don't set _n_par_threads because it affects MT in process_roots() // and the decisions on that MT processing is made elsewhere. assert(_parallel_workers->active_workers() > 0, "Should have been set"); _parallel_workers->run_task(&markingTask); @@ -1300,6 +1312,7 @@ Universe::verify(VerifyOption_G1UsePrevMarking, " VerifyDuringGC:(before)"); } + g1h->check_bitmaps("Remark Start"); G1CollectorPolicy* g1p = g1h->g1_policy(); g1p->record_concurrent_mark_remark_start(); @@ -1348,6 +1361,7 @@ Universe::verify(VerifyOption_G1UseNextMarking, " VerifyDuringGC:(after)"); } + g1h->check_bitmaps("Remark End"); assert(!restart_for_overflow(), "sanity"); // Completely reset the marking state since marking completed set_non_marking_state(); @@ -1389,7 +1403,7 @@ void set_bit_for_region(HeapRegion* hr) { assert(!hr->continuesHumongous(), "should have filtered those out"); - BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); + BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); if (!hr->startsHumongous()) { // Normal (non-humongous) case: just set the bit. _region_bm->par_at_put(index, true); @@ -1577,7 +1591,7 @@ if (_verbose) { gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, - hr->hrs_index(), exp_marked_bytes, act_marked_bytes); + hr->hrm_index(), exp_marked_bytes, act_marked_bytes); } failures += 1; } @@ -1586,7 +1600,7 @@ // (which was just calculated) region bit maps. // We're not OK if the bit in the calculated expected region // bitmap is set and the bit in the actual region bitmap is not. - BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); + BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); bool expected = _exp_region_bm->at(index); bool actual = _region_bm->at(index); @@ -1594,7 +1608,7 @@ if (_verbose) { gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " "expected: %s, actual: %s", - hr->hrs_index(), + hr->hrm_index(), BOOL_TO_STR(expected), BOOL_TO_STR(actual)); } failures += 1; @@ -1615,7 +1629,7 @@ if (_verbose) { gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " "expected: %s, actual: %s", - hr->hrs_index(), i, + hr->hrm_index(), i, BOOL_TO_STR(expected), BOOL_TO_STR(actual)); } failures += 1; @@ -1997,6 +2011,7 @@ Universe::verify(VerifyOption_G1UsePrevMarking, " VerifyDuringGC:(before)"); } + g1h->check_bitmaps("Cleanup Start"); G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); g1p->record_concurrent_mark_cleanup_start(); @@ -2034,8 +2049,8 @@ // that calculated by walking the marking bitmap. // Bitmaps to hold expected values - BitMap expected_region_bm(_region_bm.size(), false); - BitMap expected_card_bm(_card_bm.size(), false); + BitMap expected_region_bm(_region_bm.size(), true); + BitMap expected_card_bm(_card_bm.size(), true); G1ParVerifyFinalCountTask g1_par_verify_task(g1h, &_region_bm, @@ -2137,22 +2152,31 @@ // Update the soft reference policy with the new heap occupancy. Universe::update_heap_info_at_gc(); - // We need to make this be a "collection" so any collection pause that - // races with it goes around and waits for completeCleanup to finish. - g1h->increment_total_collections(); - - // We reclaimed old regions so we should calculate the sizes to make - // sure we update the old gen/space data. - g1h->g1mm()->update_sizes(); - if (VerifyDuringGC) { HandleMark hm; // handle scope Universe::heap()->prepare_for_verify(); Universe::verify(VerifyOption_G1UsePrevMarking, " VerifyDuringGC:(after)"); } + g1h->check_bitmaps("Cleanup End"); g1h->verify_region_sets_optional(); + + // We need to make this be a "collection" so any collection pause that + // races with it goes around and waits for completeCleanup to finish. + g1h->increment_total_collections(); + + // Clean out dead classes and update Metaspace sizes. + if (ClassUnloadingWithConcurrentMark) { + ClassLoaderDataGraph::purge(); + } + MetaspaceGC::compute_new_size(); + + // We reclaimed old regions so we should calculate the sizes to make + // sure we update the old gen/space data. + g1h->g1mm()->update_sizes(); + g1h->allocation_context_stats().update_after_mark(); + g1h->trace_heap_after_concurrent_cycle(); } @@ -2170,10 +2194,10 @@ _cleanup_list.length()); } - // Noone else should be accessing the _cleanup_list at this point, - // so it's not necessary to take any locks + // No one else should be accessing the _cleanup_list at this point, + // so it is not necessary to take any locks while (!_cleanup_list.is_empty()) { - HeapRegion* hr = _cleanup_list.remove_head(); + HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); assert(hr != NULL, "Got NULL from a non-empty list"); hr->par_clear(); tmp_free_list.add_ordered(hr); @@ -2382,6 +2406,8 @@ } virtual void work(uint worker_id) { + ResourceMark rm; + HandleMark hm; CMTask* task = _cm->task(worker_id); G1CMIsAliveClosure g1_is_alive(_g1h); G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); @@ -2439,6 +2465,26 @@ _g1h->set_par_threads(0); } +void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { + G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); +} + +// Helper class to get rid of some boilerplate code. +class G1RemarkGCTraceTime : public GCTraceTime { + static bool doit_and_prepend(bool doit) { + if (doit) { + gclog_or_tty->put(' '); + } + return doit; + } + + public: + G1RemarkGCTraceTime(const char* title, bool doit) + : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), + G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { + } +}; + void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { if (has_overflown()) { // Skip processing the discovered references if we have @@ -2464,7 +2510,7 @@ if (G1Log::finer()) { gclog_or_tty->put(' '); } - GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm()); + GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id()); ReferenceProcessor* rp = g1h->ref_processor_cm(); @@ -2521,7 +2567,8 @@ &g1_keep_alive, &g1_drain_mark_stack, executor, - g1h->gc_timer_cm()); + g1h->gc_timer_cm(), + concurrent_gc_id()); g1h->gc_tracer_cm()->report_gc_reference_stats(stats); // The do_oop work routines of the keep_alive and drain_marking_stack @@ -2550,9 +2597,41 @@ return; } - g1h->unlink_string_and_symbol_table(&g1_is_alive, - /* process_strings */ false, // currently strings are always roots - /* process_symbols */ true); + assert(_markStack.isEmpty(), "Marking should have completed"); + + // Unload Klasses, String, Symbols, Code Cache, etc. + { + G1RemarkGCTraceTime trace("Unloading", G1Log::finer()); + + if (ClassUnloadingWithConcurrentMark) { + // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack + // part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase. + // Defer the cleaning until we have complete on_stack data. + MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */); + + bool purged_classes; + + { + G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest()); + purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); + } + + { + G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest()); + weakRefsWorkParallelPart(&g1_is_alive, purged_classes); + } + + { + G1RemarkGCTraceTime trace("Deallocate Metadata", G1Log::finest()); + ClassLoaderDataGraph::free_deallocate_lists(); + } + } + + if (G1StringDedup::is_enabled()) { + G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest()); + G1StringDedup::unlink(&g1_is_alive); + } + } } void ConcurrentMark::swapMarkBitMaps() { @@ -2561,6 +2640,57 @@ _nextMarkBitMap = (CMBitMap*) temp; } +class CMObjectClosure; + +// Closure for iterating over objects, currently only used for +// processing SATB buffers. +class CMObjectClosure : public ObjectClosure { +private: + CMTask* _task; + +public: + void do_object(oop obj) { + _task->deal_with_reference(obj); + } + + CMObjectClosure(CMTask* task) : _task(task) { } +}; + +class G1RemarkThreadsClosure : public ThreadClosure { + CMObjectClosure _cm_obj; + G1CMOopClosure _cm_cl; + MarkingCodeBlobClosure _code_cl; + int _thread_parity; + bool _is_par; + + public: + G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) : + _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), + _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {} + + void do_thread(Thread* thread) { + if (thread->is_Java_thread()) { + if (thread->claim_oops_do(_is_par, _thread_parity)) { + JavaThread* jt = (JavaThread*)thread; + + // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking + // however the liveness of oops reachable from nmethods have very complex lifecycles: + // * Alive if on the stack of an executing method + // * Weakly reachable otherwise + // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be + // live by the SATB invariant but other oops recorded in nmethods may behave differently. + jt->nmethods_do(&_code_cl); + + jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); + } + } else if (thread->is_VM_thread()) { + if (thread->claim_oops_do(_is_par, _thread_parity)) { + JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); + } + } + } +}; + class CMRemarkTask: public AbstractGangTask { private: ConcurrentMark* _cm; @@ -2572,6 +2702,14 @@ if (worker_id < _cm->active_tasks()) { CMTask* task = _cm->task(worker_id); task->record_start_time(); + { + ResourceMark rm; + HandleMark hm; + + G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial); + Threads::threads_do(&threads_f); + } + do { task->do_marking_step(1000000000.0 /* something very large */, true /* do_termination */, @@ -2594,6 +2732,8 @@ HandleMark hm; G1CollectedHeap* g1h = G1CollectedHeap::heap(); + G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer()); + g1h->ensure_parsability(false); if (G1CollectedHeap::use_parallel_gc_threads()) { @@ -2673,7 +2813,6 @@ str = " O"; } else { HeapRegion* hr = _g1h->heap_region_containing(obj); - guarantee(hr != NULL, "invariant"); bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); bool marked = _g1h->is_marked(obj, _vo); @@ -2814,11 +2953,6 @@ _nextMarkBitMap->clearRange(mr); } -void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) { - clearRangePrevBitmap(mr); - clearRangeNextBitmap(mr); -} - HeapRegion* ConcurrentMark::claim_region(uint worker_id) { // "checkpoint" the finger @@ -2852,22 +2986,25 @@ // claim_region() and a humongous object allocation might force us // to do a bit of unnecessary work (due to some unnecessary bitmap // iterations) but it should not introduce and correctness issues. - HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); - HeapWord* bottom = curr_region->bottom(); - HeapWord* end = curr_region->end(); - HeapWord* limit = curr_region->next_top_at_mark_start(); - - if (verbose_low()) { - gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " - "["PTR_FORMAT", "PTR_FORMAT"), " - "limit = "PTR_FORMAT, - worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); - } + HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); + + // Above heap_region_containing_raw may return NULL as we always scan claim + // until the end of the heap. In this case, just jump to the next region. + HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; // Is the gap between reading the finger and doing the CAS too long? HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); - if (res == finger) { + if (res == finger && curr_region != NULL) { // we succeeded + HeapWord* bottom = curr_region->bottom(); + HeapWord* limit = curr_region->next_top_at_mark_start(); + + if (verbose_low()) { + gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " + "["PTR_FORMAT", "PTR_FORMAT"), " + "limit = "PTR_FORMAT, + worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); + } // notice that _finger == end cannot be guaranteed here since, // someone else might have moved the finger even further @@ -2898,10 +3035,17 @@ } else { assert(_finger > finger, "the finger should have moved forward"); if (verbose_low()) { - gclog_or_tty->print_cr("[%u] somebody else moved the finger, " - "global finger = "PTR_FORMAT", " - "our finger = "PTR_FORMAT, - worker_id, p2i(_finger), p2i(finger)); + if (curr_region == NULL) { + gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " + "global finger = "PTR_FORMAT", " + "our finger = "PTR_FORMAT, + worker_id, p2i(_finger), p2i(finger)); + } else { + gclog_or_tty->print_cr("[%u] somebody else moved the finger, " + "global finger = "PTR_FORMAT", " + "our finger = "PTR_FORMAT, + worker_id, p2i(_finger), p2i(finger)); + } } // read it again @@ -3016,8 +3160,10 @@ // happens, heap_region_containing() will return the bottom of the // corresponding starts humongous region and the check below will // not hold any more. + // Since we always iterate over all regions, we might get a NULL HeapRegion + // here. HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); - guarantee(global_finger == global_hr->bottom(), + guarantee(global_hr == NULL || global_finger == global_hr->bottom(), err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); } @@ -3030,7 +3176,7 @@ if (task_finger != NULL && task_finger < _heap_end) { // See above note on the global finger verification. HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); - guarantee(task_finger == task_hr->bottom() || + guarantee(task_hr == NULL || task_finger == task_hr->bottom() || !task_hr->in_collection_set(), err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); @@ -3109,7 +3255,7 @@ assert(limit_idx <= end_idx, "or else use atomics"); // Aggregate the "stripe" in the count data associated with hr. - uint hrs_index = hr->hrs_index(); + uint hrm_index = hr->hrm_index(); size_t marked_bytes = 0; for (uint i = 0; i < _max_worker_id; i += 1) { @@ -3118,7 +3264,7 @@ // Fetch the marked_bytes in this region for task i and // add it to the running total for this region. - marked_bytes += marked_bytes_array[hrs_index]; + marked_bytes += marked_bytes_array[hrm_index]; // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) // into the global card bitmap. @@ -3241,8 +3387,14 @@ // abandon current marking iteration due to a Full GC void ConcurrentMark::abort() { - // Clear all marks to force marking thread to do nothing + // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next + // concurrent bitmap clearing. _nextMarkBitMap->clearAll(); + + // Note we cannot clear the previous marking bitmap here + // since VerifyDuringGC verifies the objects marked during + // a full GC against the previous bitmap. + // Clear the liveness counting data clear_all_count_data(); // Empty mark stack @@ -3252,6 +3404,12 @@ } _first_overflow_barrier_sync.abort(); _second_overflow_barrier_sync.abort(); + const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); + if (!gc_id.is_undefined()) { + // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance + // to detect that it was aborted. Only keep track of the first GC id that we aborted. + _aborted_gc_id = gc_id; + } _has_aborted = true; SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); @@ -3266,6 +3424,13 @@ _g1h->register_concurrent_cycle_end(); } +const GCId& ConcurrentMark::concurrent_gc_id() { + if (has_aborted()) { + return _aborted_gc_id; + } + return _g1h->gc_tracer_cm()->gc_id(); +} + static void print_ms_time_info(const char* prefix, const char* name, NumberSeq& ns) { gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", @@ -3322,32 +3487,17 @@ // We take a break if someone is trying to stop the world. bool ConcurrentMark::do_yield_check(uint worker_id) { - if (should_yield()) { + if (SuspendibleThreadSet::should_yield()) { if (worker_id == 0) { _g1h->g1_policy()->record_concurrent_pause(); } - cmThread()->yield(); + SuspendibleThreadSet::yield(); return true; } else { return false; } } -bool ConcurrentMark::should_yield() { - return cmThread()->should_yield(); -} - -bool ConcurrentMark::containing_card_is_marked(void* p) { - size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); - return _card_bm.at(offset >> CardTableModRefBS::card_shift); -} - -bool ConcurrentMark::containing_cards_are_marked(void* start, - void* last) { - return containing_card_is_marked(start) && - containing_card_is_marked(last); -} - #ifndef PRODUCT // for debugging purposes void ConcurrentMark::print_finger() { @@ -3410,20 +3560,6 @@ } }; -// Closure for iterating over objects, currently only used for -// processing SATB buffers. -class CMObjectClosure : public ObjectClosure { -private: - CMTask* _task; - -public: - void do_object(oop obj) { - _task->deal_with_reference(obj); - } - - CMObjectClosure(CMTask* task) : _task(task) { } -}; - G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task) @@ -3437,9 +3573,8 @@ } void CMTask::setup_for_region(HeapRegion* hr) { - // Separated the asserts so that we know which one fires. assert(hr != NULL, - "claim_region() should have filtered out continues humongous regions"); + "claim_region() should have filtered out NULL regions"); assert(!hr->continuesHumongous(), "claim_region() should have filtered out continues humongous regions"); @@ -3615,7 +3750,7 @@ if (_cm->verbose_medium()) { gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " - "scanned = %d%s, refs reached = %d%s", + "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", _worker_id, last_interval_ms, _words_scanned, (_words_scanned >= _words_scanned_limit) ? " (*)" : "", @@ -3625,7 +3760,7 @@ #endif // _MARKING_STATS_ // (4) We check whether we should yield. If we have to, then we abort. - if (_cm->should_yield()) { + if (SuspendibleThreadSet::should_yield()) { // We should yield. To do this we abort the task. The caller is // responsible for yielding. set_has_aborted(); @@ -3889,15 +4024,6 @@ } } - if (!concurrent() && !has_aborted()) { - // We should only do this during remark. - if (G1CollectedHeap::use_parallel_gc_threads()) { - satb_mq_set.par_iterate_closure_all_threads(_worker_id); - } else { - satb_mq_set.iterate_closure_all_threads(); - } - } - _draining_satb_buffers = false; assert(has_aborted() || @@ -4555,7 +4681,6 @@ _hum_prev_live_bytes(0), _hum_next_live_bytes(0), _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - MemRegion g1_committed = g1h->g1_committed(); MemRegion g1_reserved = g1h->g1_reserved(); double now = os::elapsedTime(); @@ -4563,10 +4688,8 @@ _out->cr(); _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" - G1PPRL_SUM_ADDR_FORMAT("committed") G1PPRL_SUM_ADDR_FORMAT("reserved") G1PPRL_SUM_BYTE_FORMAT("region-size"), - p2i(g1_committed.start()), p2i(g1_committed.end()), p2i(g1_reserved.start()), p2i(g1_reserved.end()), HeapRegion::GrainBytes); _out->print_cr(G1PPRL_LINE_PREFIX); @@ -4627,7 +4750,7 @@ } bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { - const char* type = ""; + const char* type = r->get_type_str(); HeapWord* bottom = r->bottom(); HeapWord* end = r->end(); size_t capacity_bytes = r->capacity(); @@ -4638,15 +4761,7 @@ size_t remset_bytes = r->rem_set()->mem_size(); size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); - if (r->used() == 0) { - type = "FREE"; - } else if (r->is_survivor()) { - type = "SURV"; - } else if (r->is_young()) { - type = "EDEN"; - } else if (r->startsHumongous()) { - type = "HUMS"; - + if (r->startsHumongous()) { assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, "they should have been zeroed after the last time we used them"); @@ -4659,12 +4774,9 @@ &prev_live_bytes, &next_live_bytes); end = bottom + HeapRegion::GrainWords; } else if (r->continuesHumongous()) { - type = "HUMC"; get_hum_bytes(&used_bytes, &capacity_bytes, &prev_live_bytes, &next_live_bytes); assert(end == bottom + HeapRegion::GrainWords, "invariant"); - } else { - type = "OLD"; } _total_used_bytes += used_bytes; --- ./hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,10 +25,14 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP +#include "classfile/javaClasses.hpp" #include "gc_implementation/g1/heapRegionSet.hpp" +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "utilities/taskqueue.hpp" class G1CollectedHeap; +class CMBitMap; class CMTask; typedef GenericTaskQueue CMTaskQueue; typedef GenericTaskQueueSet CMTaskQueueSet; @@ -55,7 +59,6 @@ HeapWord* _bmStartWord; // base address of range covered by map size_t _bmWordSize; // map size (in #HeapWords covered) const int _shifter; // map to char or bit - VirtualSpace _virtual_space; // underlying the bit map BitMap _bm; // the bit map itself public: @@ -85,19 +88,19 @@ // Return the address corresponding to the next marked bit at or after // "addr", and before "limit", if "limit" is non-NULL. If there is no // such bit, returns "limit" if that is non-NULL, or else "endWord()". - HeapWord* getNextMarkedWordAddress(HeapWord* addr, - HeapWord* limit = NULL) const; + HeapWord* getNextMarkedWordAddress(const HeapWord* addr, + const HeapWord* limit = NULL) const; // Return the address corresponding to the next unmarked bit at or after // "addr", and before "limit", if "limit" is non-NULL. If there is no // such bit, returns "limit" if that is non-NULL, or else "endWord()". - HeapWord* getNextUnmarkedWordAddress(HeapWord* addr, - HeapWord* limit = NULL) const; + HeapWord* getNextUnmarkedWordAddress(const HeapWord* addr, + const HeapWord* limit = NULL) const; // conversion utilities HeapWord* offsetToHeapWord(size_t offset) const { return _bmStartWord + (offset << _shifter); } - size_t heapWordToOffset(HeapWord* addr) const { + size_t heapWordToOffset(const HeapWord* addr) const { return pointer_delta(addr, _bmStartWord) >> _shifter; } int heapWordDiffToOffsetDiff(size_t diff) const; @@ -113,42 +116,41 @@ void print_on_error(outputStream* st, const char* prefix) const; // debugging - NOT_PRODUCT(bool covers(ReservedSpace rs) const;) + NOT_PRODUCT(bool covers(MemRegion rs) const;) +}; + +class CMBitMapMappingChangedListener : public G1MappingChangedListener { + private: + CMBitMap* _bm; + public: + CMBitMapMappingChangedListener() : _bm(NULL) {} + + void set_bitmap(CMBitMap* bm) { _bm = bm; } + + virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); }; class CMBitMap : public CMBitMapRO { + private: + CMBitMapMappingChangedListener _listener; public: - // constructor - CMBitMap(int shifter) : - CMBitMapRO(shifter) {} + static size_t compute_size(size_t heap_size); + // Returns the amount of bytes on the heap between two marks in the bitmap. + static size_t mark_distance(); - // Allocates the back store for the marking bitmap - bool allocate(ReservedSpace heap_rs); + CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); } - // write marks - void mark(HeapWord* addr) { - assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), - "outside underlying space?"); - _bm.set_bit(heapWordToOffset(addr)); - } - void clear(HeapWord* addr) { - assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), - "outside underlying space?"); - _bm.clear_bit(heapWordToOffset(addr)); - } - bool parMark(HeapWord* addr) { - assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), - "outside underlying space?"); - return _bm.par_set_bit(heapWordToOffset(addr)); - } - bool parClear(HeapWord* addr) { - assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), - "outside underlying space?"); - return _bm.par_clear_bit(heapWordToOffset(addr)); - } + // Initializes the underlying BitMap to cover the given area. + void initialize(MemRegion heap, G1RegionToSpaceMapper* storage); + + // Write marks. + inline void mark(HeapWord* addr); + inline void clear(HeapWord* addr); + inline bool parMark(HeapWord* addr); + inline bool parClear(HeapWord* addr); + void markRange(MemRegion mr); - void clearAll(); void clearRange(MemRegion mr); // Starting at the bit corresponding to "addr" (inclusive), find the next @@ -159,6 +161,9 @@ // the run. If there is no "1" bit at or after "addr", return an empty // MemRegion. MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr); + + // Clear the whole mark bitmap. + void clearAll(); }; // Represents a marking stack used by ConcurrentMarking in the G1 collector. @@ -444,6 +449,7 @@ volatile bool _concurrent; // set at the end of a Full GC so that marking aborts volatile bool _has_aborted; + GCId _aborted_gc_id; // used when remark aborts due to an overflow to indicate that // another concurrent marking phase should start @@ -474,6 +480,7 @@ ForceOverflowSettings _force_overflow_conc; ForceOverflowSettings _force_overflow_stw; + void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes); void weakRefsWork(bool clear_all_soft_refs); void swapMarkBitMaps(); @@ -676,7 +683,9 @@ return _task_queues->steal(worker_id, hash_seed, obj); } - ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs); + ConcurrentMark(G1CollectedHeap* g1h, + G1RegionToSpaceMapper* prev_bitmap_storage, + G1RegionToSpaceMapper* next_bitmap_storage); ~ConcurrentMark(); ConcurrentMarkThread* cmThread() { return _cmThread; } @@ -705,8 +714,10 @@ // inconsistent) and always passing the size. hr is the region that // contains the object and it's passed optionally from callers who // might already have it (no point in recalculating it). - inline void grayRoot(oop obj, size_t word_size, - uint worker_id, HeapRegion* hr = NULL); + inline void grayRoot(oop obj, + size_t word_size, + uint worker_id, + HeapRegion* hr = NULL); // It iterates over the heap and for each object it comes across it // will dump the contents of its reference fields, as well as @@ -727,11 +738,16 @@ // AND MARKED : indicates that an object is both explicitly and // implicitly live (it should be one or the other, not both) void print_reachable(const char* str, - VerifyOption vo, bool all) PRODUCT_RETURN; + VerifyOption vo, + bool all) PRODUCT_RETURN; // Clear the next marking bitmap (will be called concurrently). void clearNextBitmap(); + // Return whether the next mark bitmap has no marks set. To be used for assertions + // only. Will not yield to pause requests. + bool nextMarkBitmapIsClear(); + // These two do the work that needs to be done before and after the // initial root checkpoint. Since this checkpoint can be done at two // different points (i.e. an explicit pause or piggy-backed on a @@ -760,12 +776,11 @@ // this carefully! inline void markPrev(oop p); - // Clears marks for all objects in the given range, for the prev, - // next, or both bitmaps. NB: the previous bitmap is usually + // Clears marks for all objects in the given range, for the prev or + // next bitmaps. NB: the previous bitmap is usually // read-only, so use this carefully! void clearRangePrevBitmap(MemRegion mr); void clearRangeNextBitmap(MemRegion mr); - void clearRangeBothBitmaps(MemRegion mr); // Notify data structures that a GC has started. void note_start_of_gc() { @@ -787,27 +802,6 @@ bool verify_thread_buffers, bool verify_fingers) PRODUCT_RETURN; - // It is called at the end of an evacuation pause during marking so - // that CM is notified of where the new end of the heap is. It - // doesn't do anything if concurrent_marking_in_progress() is false, - // unless the force parameter is true. - void update_g1_committed(bool force = false); - - bool isMarked(oop p) const { - assert(p != NULL && p->is_oop(), "expected an oop"); - HeapWord* addr = (HeapWord*)p; - assert(addr >= _nextMarkBitMap->startWord() || - addr < _nextMarkBitMap->endWord(), "in a region"); - - return _nextMarkBitMap->isMarked(addr); - } - - inline bool not_yet_marked(oop p) const; - - // XXX Debug code - bool containing_card_is_marked(void* p); - bool containing_cards_are_marked(void* start, void* last); - bool isPrevMarked(oop p) const { assert(p != NULL && p->is_oop(), "expected an oop"); HeapWord* addr = (HeapWord*)p; @@ -818,13 +812,14 @@ } inline bool do_yield_check(uint worker_i = 0); - inline bool should_yield(); // Called to abort the marking cycle after a Full GC takes palce. void abort(); bool has_aborted() { return _has_aborted; } + const GCId& concurrent_gc_id(); + // This prints the global/local fingers. It is used for debugging. NOT_PRODUCT(void print_finger();) @@ -892,7 +887,8 @@ // marked_bytes array slot for the given HeapRegion. // Sets the bits in the given card bitmap that are associated with the // cards that are spanned by the memory region. - inline void count_region(MemRegion mr, HeapRegion* hr, + inline void count_region(MemRegion mr, + HeapRegion* hr, size_t* marked_bytes_array, BitMap* task_card_bm); @@ -900,56 +896,27 @@ // data structures for the given worker id. inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id); - // Counts the given memory region in the task/worker counting - // data structures for the given worker id. - inline void count_region(MemRegion mr, uint worker_id); - // Counts the given object in the given task/worker counting // data structures. - inline void count_object(oop obj, HeapRegion* hr, + inline void count_object(oop obj, + HeapRegion* hr, size_t* marked_bytes_array, BitMap* task_card_bm); - // Counts the given object in the task/worker counting data - // structures for the given worker id. - inline void count_object(oop obj, HeapRegion* hr, uint worker_id); - // Attempts to mark the given object and, if successful, counts // the object in the given task/worker counting structures. - inline bool par_mark_and_count(oop obj, HeapRegion* hr, + inline bool par_mark_and_count(oop obj, + HeapRegion* hr, size_t* marked_bytes_array, BitMap* task_card_bm); // Attempts to mark the given object and, if successful, counts // the object in the task/worker counting structures for the // given worker id. - inline bool par_mark_and_count(oop obj, size_t word_size, - HeapRegion* hr, uint worker_id); - - // Attempts to mark the given object and, if successful, counts - // the object in the task/worker counting structures for the - // given worker id. - inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id); - - // Similar to the above routine but we don't know the heap region that - // contains the object to be marked/counted, which this routine looks up. - inline bool par_mark_and_count(oop obj, uint worker_id); - - // Similar to the above routine but there are times when we cannot - // safely calculate the size of obj due to races and we, therefore, - // pass the size in as a parameter. It is the caller's reponsibility - // to ensure that the size passed in for obj is valid. - inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); - - // Unconditionally mark the given object, and unconditinally count - // the object in the counting structures for worker id 0. - // Should *not* be called from parallel code. - inline bool mark_and_count(oop obj, HeapRegion* hr); - - // Similar to the above routine but we don't know the heap region that - // contains the object to be marked/counted, which this routine looks up. - // Should *not* be called from parallel code. - inline bool mark_and_count(oop obj); + inline bool par_mark_and_count(oop obj, + size_t word_size, + HeapRegion* hr, + uint worker_id); // Returns true if initialization was successfully completed. bool completed_initialization() const { @@ -1221,9 +1188,12 @@ _finger = new_finger; } - CMTask(uint worker_id, ConcurrentMark *cm, - size_t* marked_bytes, BitMap* card_bm, - CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); + CMTask(uint worker_id, + ConcurrentMark *cm, + size_t* marked_bytes, + BitMap* card_bm, + CMTaskQueue* task_queue, + CMTaskQueueSet* task_queues); // it prints statistics associated with this task void print_stats(); --- ./hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -86,7 +86,7 @@ HeapWord* start = mr.start(); HeapWord* end = mr.end(); size_t region_size_bytes = mr.byte_size(); - uint index = hr->hrs_index(); + uint index = hr->hrm_index(); assert(!hr->continuesHumongous(), "should not be HC region"); assert(hr == g1h->heap_region_containing(start), "sanity"); @@ -125,14 +125,6 @@ count_region(mr, hr, marked_bytes_array, task_card_bm); } -// Counts the given memory region, which may be a single object, in the -// task/worker counting data structures for the given worker id. -inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) { - HeapWord* addr = mr.start(); - HeapRegion* hr = _g1h->heap_region_containing_raw(addr); - count_region(mr, hr, worker_id); -} - // Counts the given object in the given task/worker counting data structures. inline void ConcurrentMark::count_object(oop obj, HeapRegion* hr, @@ -142,17 +134,6 @@ count_region(mr, hr, marked_bytes_array, task_card_bm); } -// Counts the given object in the task/worker counting data -// structures for the given worker id. -inline void ConcurrentMark::count_object(oop obj, - HeapRegion* hr, - uint worker_id) { - size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id); - BitMap* task_card_bm = count_card_bitmap_for(worker_id); - HeapWord* addr = (HeapWord*) obj; - count_object(obj, hr, marked_bytes_array, task_card_bm); -} - // Attempts to mark the given object and, if successful, counts // the object in the given task/worker counting structures. inline bool ConcurrentMark::par_mark_and_count(oop obj, @@ -184,63 +165,6 @@ return false; } -// Attempts to mark the given object and, if successful, counts -// the object in the task/worker counting structures for the -// given worker id. -inline bool ConcurrentMark::par_mark_and_count(oop obj, - HeapRegion* hr, - uint worker_id) { - HeapWord* addr = (HeapWord*)obj; - if (_nextMarkBitMap->parMark(addr)) { - // Update the task specific count data for the object. - count_object(obj, hr, worker_id); - return true; - } - return false; -} - -// As above - but we don't know the heap region containing the -// object and so have to supply it. -inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) { - HeapWord* addr = (HeapWord*)obj; - HeapRegion* hr = _g1h->heap_region_containing_raw(addr); - return par_mark_and_count(obj, hr, worker_id); -} - -// Similar to the above routine but we already know the size, in words, of -// the object that we wish to mark/count -inline bool ConcurrentMark::par_mark_and_count(oop obj, - size_t word_size, - uint worker_id) { - HeapWord* addr = (HeapWord*)obj; - if (_nextMarkBitMap->parMark(addr)) { - // Update the task specific count data for the object. - MemRegion mr(addr, word_size); - count_region(mr, worker_id); - return true; - } - return false; -} - -// Unconditionally mark the given object, and unconditinally count -// the object in the counting structures for worker id 0. -// Should *not* be called from parallel code. -inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) { - HeapWord* addr = (HeapWord*)obj; - _nextMarkBitMap->mark(addr); - // Update the task specific count data for the object. - count_object(obj, hr, 0 /* worker_id */); - return true; -} - -// As above - but we don't have the heap region containing the -// object, so we have to supply it. -inline bool ConcurrentMark::mark_and_count(oop obj) { - HeapWord* addr = (HeapWord*)obj; - HeapRegion* hr = _g1h->heap_region_containing_raw(addr); - return mark_and_count(obj, hr); -} - inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) { HeapWord* start_addr = MAX2(startWord(), mr.start()); HeapWord* end_addr = MIN2(endWord(), mr.end()); @@ -268,6 +192,36 @@ return iterate(cl, mr); } +#define check_mark(addr) \ + assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \ + "outside underlying space?"); \ + assert(G1CollectedHeap::heap()->is_in_exact(addr), \ + err_msg("Trying to access not available bitmap "PTR_FORMAT \ + " corresponding to "PTR_FORMAT" (%u)", \ + p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr))); + +inline void CMBitMap::mark(HeapWord* addr) { + check_mark(addr); + _bm.set_bit(heapWordToOffset(addr)); +} + +inline void CMBitMap::clear(HeapWord* addr) { + check_mark(addr); + _bm.clear_bit(heapWordToOffset(addr)); +} + +inline bool CMBitMap::parMark(HeapWord* addr) { + check_mark(addr); + return _bm.par_set_bit(heapWordToOffset(addr)); +} + +inline bool CMBitMap::parClear(HeapWord* addr) { + check_mark(addr); + return _bm.par_clear_bit(heapWordToOffset(addr)); +} + +#undef check_mark + inline void CMTask::push(oop obj) { HeapWord* objAddr = (HeapWord*) obj; assert(_g1h->is_in_g1_reserved(objAddr), "invariant"); --- ./hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,4 +1,4 @@ -/* + /* * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,6 +29,7 @@ #include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1MMUTracker.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "memory/resourceArea.hpp" #include "runtime/vmThread.hpp" @@ -109,8 +110,7 @@ double scan_start = os::elapsedTime(); if (!cm()->has_aborted()) { if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); } @@ -118,8 +118,7 @@ double scan_end = os::elapsedTime(); if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", scan_end - scan_start); } @@ -127,8 +126,7 @@ double mark_start_sec = os::elapsedTime(); if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-mark-start]"); } @@ -151,8 +149,7 @@ } if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec); } @@ -167,8 +164,7 @@ "in remark (restart #%d).", iter); } if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]"); } } @@ -194,9 +190,8 @@ } else { // We don't want to update the marking status if a GC pause // is already underway. - _sts.join(); + SuspendibleThreadSetJoiner sts; g1h->set_marking_complete(); - _sts.leave(); } // Check if cleanup set the free_regions_coming flag. If it @@ -212,8 +207,7 @@ double cleanup_start_sec = os::elapsedTime(); if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-cleanup-start]"); } @@ -233,8 +227,7 @@ double cleanup_end_sec = os::elapsedTime(); if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]", cleanup_end_sec - cleanup_start_sec); } @@ -266,46 +259,47 @@ // record_concurrent_mark_cleanup_completed() (and, in fact, it's // not needed any more as the concurrent mark state has been // already reset). - _sts.join(); - if (!cm()->has_aborted()) { - g1_policy->record_concurrent_mark_cleanup_completed(); + { + SuspendibleThreadSetJoiner sts; + if (!cm()->has_aborted()) { + g1_policy->record_concurrent_mark_cleanup_completed(); + } } - _sts.leave(); if (cm()->has_aborted()) { if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-mark-abort]"); } } // We now want to allow clearing of the marking bitmap to be // suspended by a collection pause. - _sts.join(); - _cm->clearNextBitmap(); - _sts.leave(); + // We may have aborted just before the remark. Do not bother clearing the + // bitmap then, as it has been done during mark abort. + if (!cm()->has_aborted()) { + SuspendibleThreadSetJoiner sts; + _cm->clearNextBitmap(); + } else { + assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear"); + } } // Update the number of full collections that have been // completed. This will also notify the FullGCCount_lock in case a // Java thread is waiting for a full GC to happen (e.g., it // called System.gc() with +ExplicitGCInvokesConcurrent). - _sts.join(); - g1h->increment_old_marking_cycles_completed(true /* concurrent */); - g1h->register_concurrent_cycle_end(); - _sts.leave(); + { + SuspendibleThreadSetJoiner sts; + g1h->increment_old_marking_cycles_completed(true /* concurrent */); + g1h->register_concurrent_cycle_end(); + } } assert(_should_terminate, "just checking"); terminate(); } - -void ConcurrentMarkThread::yield() { - _sts.yield("Concurrent Mark"); -} - void ConcurrentMarkThread::stop() { { MutexLockerEx ml(Terminator_lock); --- ./hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -89,9 +89,6 @@ // that started() is set and set in_progress(). bool during_cycle() { return started() || in_progress(); } - // Yield for GC - void yield(); - // shutdown void stop(); }; --- ./hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -70,7 +70,7 @@ DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) : PtrQueueSet(notify_when_complete), - _closure(NULL), + _mut_process_closure(NULL), _shared_dirty_card_queue(this, true /*perm*/), _free_ids(NULL), _processed_buffers_mut(0), _processed_buffers_rs_thread(0) @@ -83,10 +83,11 @@ return (uint)os::processor_count(); } -void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, +void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock, int process_completed_threshold, int max_completed_queue, Mutex* lock, PtrQueueSet* fl_owner) { + _mut_process_closure = cl; PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, max_completed_queue, fl_owner); set_buffer_size(G1UpdateBufferSize); @@ -98,18 +99,15 @@ t->dirty_card_queue().handle_zero_index(); } -void DirtyCardQueueSet::set_closure(CardTableEntryClosure* closure) { - _closure = closure; -} - -void DirtyCardQueueSet::iterate_closure_all_threads(bool consume, +void DirtyCardQueueSet::iterate_closure_all_threads(CardTableEntryClosure* cl, + bool consume, uint worker_i) { assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); for(JavaThread* t = Threads::first(); t; t = t->next()) { - bool b = t->dirty_card_queue().apply_closure(_closure, consume); + bool b = t->dirty_card_queue().apply_closure(cl, consume); guarantee(b, "Should not be interrupted."); } - bool b = shared_dirty_card_queue()->apply_closure(_closure, + bool b = shared_dirty_card_queue()->apply_closure(cl, consume, worker_i); guarantee(b, "Should not be interrupted."); @@ -143,7 +141,7 @@ bool b = false; if (worker_i != UINT_MAX) { - b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0, + b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0, _sz, true, worker_i); if (b) Atomic::inc(&_processed_buffers_mut); @@ -218,18 +216,11 @@ return res; } -bool DirtyCardQueueSet::apply_closure_to_completed_buffer(uint worker_i, - int stop_at, - bool during_pause) { - return apply_closure_to_completed_buffer(_closure, worker_i, - stop_at, during_pause); -} - -void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() { +void DirtyCardQueueSet::apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) { BufferNode* nd = _completed_buffers_head; while (nd != NULL) { bool b = - DirtyCardQueue::apply_closure_to_buffer(_closure, + DirtyCardQueue::apply_closure_to_buffer(cl, BufferNode::make_buffer_from_node(nd), 0, _sz, false); guarantee(b, "Should not stop early."); @@ -237,6 +228,24 @@ } } +void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) { + BufferNode* nd = _cur_par_buffer_node; + while (nd != NULL) { + BufferNode* next = (BufferNode*)nd->next(); + BufferNode* actual = (BufferNode*)Atomic::cmpxchg_ptr((void*)next, (volatile void*)&_cur_par_buffer_node, (void*)nd); + if (actual == nd) { + bool b = + DirtyCardQueue::apply_closure_to_buffer(cl, + BufferNode::make_buffer_from_node(actual), + 0, _sz, false); + guarantee(b, "Should not stop early."); + nd = next; + } else { + nd = actual; + } + } +} + // Deallocates any completed log buffers void DirtyCardQueueSet::clear() { BufferNode* buffers_to_delete = NULL; --- ./hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -80,7 +80,8 @@ class DirtyCardQueueSet: public PtrQueueSet { - CardTableEntryClosure* _closure; + // The closure used in mut_process_buffer(). + CardTableEntryClosure* _mut_process_closure; DirtyCardQueue _shared_dirty_card_queue; @@ -95,10 +96,12 @@ jint _processed_buffers_mut; jint _processed_buffers_rs_thread; + // Current buffer node used for parallel iteration. + BufferNode* volatile _cur_par_buffer_node; public: DirtyCardQueueSet(bool notify_when_complete = true); - void initialize(Monitor* cbl_mon, Mutex* fl_lock, + void initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock, int process_completed_threshold, int max_completed_queue, Mutex* lock, PtrQueueSet* fl_owner = NULL); @@ -109,33 +112,15 @@ static void handle_zero_index_for_thread(JavaThread* t); - // Register "blk" as "the closure" for all queues. Only one such closure - // is allowed. The "apply_closure_to_completed_buffer" method will apply - // this closure to a completed buffer, and "iterate_closure_all_threads" - // applies it to partially-filled buffers (the latter should only be done - // with the world stopped). - void set_closure(CardTableEntryClosure* closure); - - // If there is a registered closure for buffers, apply it to all entries - // in all currently-active buffers. This should only be applied at a - // safepoint. (Currently must not be called in parallel; this should - // change in the future.) If "consume" is true, processed entries are - // discarded. - void iterate_closure_all_threads(bool consume = true, + // Apply the given closure to all entries in all currently-active buffers. + // This should only be applied at a safepoint. (Currently must not be called + // in parallel; this should change in the future.) If "consume" is true, + // processed entries are discarded. + void iterate_closure_all_threads(CardTableEntryClosure* cl, + bool consume = true, uint worker_i = 0); // If there exists some completed buffer, pop it, then apply the - // registered closure to all its elements, nulling out those elements - // processed. If all elements are processed, returns "true". If no - // completed buffers exist, returns false. If a completed buffer exists, - // but is only partially completed before a "yield" happens, the - // partially completed buffer (with its processed elements set to NULL) - // is returned to the completed buffer set, and this call returns false. - bool apply_closure_to_completed_buffer(uint worker_i = 0, - int stop_at = 0, - bool during_pause = false); - - // If there exists some completed buffer, pop it, then apply the // specified closure to all its elements, nulling out those elements // processed. If all elements are processed, returns "true". If no // completed buffers exist, returns false. If a completed buffer exists, @@ -156,7 +141,12 @@ // Applies the current closure to all completed buffers, // non-consumptively. - void apply_closure_to_all_completed_buffers(); + void apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl); + + void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; } + // Applies the current closure to all completed buffers, non-consumptively. + // Parallel version. + void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl); DirtyCardQueue* shared_dirty_card_queue() { return &_shared_dirty_card_queue; --- ./hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "runtime/orderAccess.inline.hpp" G1CollectedHeap* G1AllocRegion::_g1h = NULL; HeapRegion* G1AllocRegion::_dummy_region = NULL; @@ -128,8 +129,7 @@ // Note that we first perform the allocation and then we store the // region in _alloc_region. This is the reason why an active region // can never be empty. - _alloc_region = new_alloc_region; - _count += 1; + update_alloc_region(new_alloc_region); trace("region allocation successful"); return result; } else { @@ -171,6 +171,19 @@ trace("set"); } +void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) { + trace("update"); + // We explicitly check that the region is not empty to make sure we + // maintain the "the alloc region cannot be empty" invariant. + assert(alloc_region != NULL && !alloc_region->is_empty(), + ar_ext_msg(this, "pre-condition")); + + _alloc_region = alloc_region; + _alloc_region->set_allocation_context(allocation_context()); + _count += 1; + trace("updated"); +} + HeapRegion* G1AllocRegion::release() { trace("releasing"); HeapRegion* alloc_region = _alloc_region; @@ -224,5 +237,70 @@ G1AllocRegion::G1AllocRegion(const char* name, bool bot_updates) : _name(name), _bot_updates(bot_updates), - _alloc_region(NULL), _count(0), _used_bytes_before(0) { } + _alloc_region(NULL), _count(0), _used_bytes_before(0), + _allocation_context(AllocationContext::system()) { } + +HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, + bool force) { + return _g1h->new_mutator_alloc_region(word_size, force); +} + +void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, + size_t allocated_bytes) { + _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); +} + +HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size, + bool force) { + assert(!force, "not supported for GC alloc regions"); + return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived); +} + +void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region, + size_t allocated_bytes) { + _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, + GCAllocForSurvived); +} + +HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size, + bool force) { + assert(!force, "not supported for GC alloc regions"); + return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured); +} + +void OldGCAllocRegion::retire_region(HeapRegion* alloc_region, + size_t allocated_bytes) { + _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, + GCAllocForTenured); +} + +HeapRegion* OldGCAllocRegion::release() { + HeapRegion* cur = get(); + if (cur != NULL) { + // Determine how far we are from the next card boundary. If it is smaller than + // the minimum object size we can allocate into, expand into the next card. + HeapWord* top = cur->top(); + HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes); + + size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize); + + if (to_allocate_words != 0) { + // We are not at a card boundary. Fill up, possibly into the next, taking the + // end of the region and the minimum object size into account. + to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize), + MAX2(to_allocate_words, G1CollectedHeap::min_fill_size())); + + // Skip allocation if there is not enough space to allocate even the smallest + // possible object. In this case this region will not be retained, so the + // original problem cannot occur. + if (to_allocate_words >= G1CollectedHeap::min_fill_size()) { + HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */); + CollectedHeap::fill_with_object(dummy, to_allocate_words); + } + } + } + return G1AllocRegion::release(); +} + + --- ./hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -57,6 +57,9 @@ // correct use of init() and release()). HeapRegion* volatile _alloc_region; + // Allocation context associated with this alloc region. + AllocationContext_t _allocation_context; + // It keeps track of the distinct number of regions that are used // for allocation in the active interval of this object, i.e., // between a call to init() and a call to release(). The count @@ -110,6 +113,10 @@ // else can allocate out of it. void retire(bool fill_up); + // After a region is allocated by alloc_new_region, this + // method is used to set it as the active alloc_region + void update_alloc_region(HeapRegion* alloc_region); + // Allocate a new active region and use it to perform a word_size // allocation. The force parameter will be passed on to // G1CollectedHeap::allocate_new_alloc_region() and tells it to try @@ -137,6 +144,9 @@ return (hr == _dummy_region) ? NULL : hr; } + void set_allocation_context(AllocationContext_t context) { _allocation_context = context; } + AllocationContext_t allocation_context() { return _allocation_context; } + uint count() { return _count; } // The following two are the building blocks for the allocation method. @@ -182,6 +192,40 @@ #endif // G1_ALLOC_REGION_TRACING }; +class MutatorAllocRegion : public G1AllocRegion { +protected: + virtual HeapRegion* allocate_new_region(size_t word_size, bool force); + virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); +public: + MutatorAllocRegion() + : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } +}; + +class SurvivorGCAllocRegion : public G1AllocRegion { +protected: + virtual HeapRegion* allocate_new_region(size_t word_size, bool force); + virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); +public: + SurvivorGCAllocRegion() + : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { } +}; + +class OldGCAllocRegion : public G1AllocRegion { +protected: + virtual HeapRegion* allocate_new_region(size_t word_size, bool force); + virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); +public: + OldGCAllocRegion() + : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } + + // This specialization of release() makes sure that the last card that has + // been allocated into has been completely filled by a dummy object. This + // avoids races when remembered set scanning wants to update the BOT of the + // last card in the retained old gc alloc region, and allocation threads + // allocating into that card at the same time. + virtual HeapRegion* release(); +}; + class ar_ext_msg : public err_msg { public: ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") { --- ./hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP #include "gc_implementation/g1/g1AllocRegion.hpp" +#include "gc_implementation/g1/heapRegion.inline.hpp" inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region, size_t word_size, --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP + +#include "memory/allocation.hpp" + +typedef unsigned char AllocationContext_t; + +class AllocationContext : AllStatic { +public: + // Currently used context + static AllocationContext_t current() { + return 0; + } + // System wide default context + static AllocationContext_t system() { + return 0; + } +}; + +class AllocationContextStats: public StackObj { +public: + inline void clear() { } + inline void update(bool full_gc) { } + inline void update_after_mark() { } + inline bool available() { return false; } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1Allocator.hpp" +#include "gc_implementation/g1/g1CollectedHeap.hpp" +#include "gc_implementation/g1/g1CollectorPolicy.hpp" +#include "gc_implementation/g1/heapRegion.inline.hpp" +#include "gc_implementation/g1/heapRegionSet.inline.hpp" + +void G1DefaultAllocator::init_mutator_alloc_region() { + assert(_mutator_alloc_region.get() == NULL, "pre-condition"); + _mutator_alloc_region.init(); +} + +void G1DefaultAllocator::release_mutator_alloc_region() { + _mutator_alloc_region.release(); + assert(_mutator_alloc_region.get() == NULL, "post-condition"); +} + +void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info, + OldGCAllocRegion* old, + HeapRegion** retained_old) { + HeapRegion* retained_region = *retained_old; + *retained_old = NULL; + + // We will discard the current GC alloc region if: + // a) it's in the collection set (it can happen!), + // b) it's already full (no point in using it), + // c) it's empty (this means that it was emptied during + // a cleanup and it should be on the free list now), or + // d) it's humongous (this means that it was emptied + // during a cleanup and was added to the free list, but + // has been subsequently used to allocate a humongous + // object that may be less than the region size). + if (retained_region != NULL && + !retained_region->in_collection_set() && + !(retained_region->top() == retained_region->end()) && + !retained_region->is_empty() && + !retained_region->isHumongous()) { + retained_region->record_top_and_timestamp(); + // The retained region was added to the old region set when it was + // retired. We have to remove it now, since we don't allow regions + // we allocate to in the region sets. We'll re-add it later, when + // it's retired again. + _g1h->_old_set.remove(retained_region); + bool during_im = _g1h->g1_policy()->during_initial_mark_pause(); + retained_region->note_start_of_copying(during_im); + old->set(retained_region); + _g1h->_hr_printer.reuse(retained_region); + evacuation_info.set_alloc_regions_used_before(retained_region->used()); + } +} + +void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { + assert_at_safepoint(true /* should_be_vm_thread */); + + _survivor_gc_alloc_region.init(); + _old_gc_alloc_region.init(); + reuse_retained_old_region(evacuation_info, + &_old_gc_alloc_region, + &_retained_old_gc_alloc_region); +} + +void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { + AllocationContext_t context = AllocationContext::current(); + evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() + + old_gc_alloc_region(context)->count()); + survivor_gc_alloc_region(context)->release(); + // If we have an old GC alloc region to release, we'll save it in + // _retained_old_gc_alloc_region. If we don't + // _retained_old_gc_alloc_region will become NULL. This is what we + // want either way so no reason to check explicitly for either + // condition. + _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release(); + + if (ResizePLAB) { + _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); + _g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); + } +} + +void G1DefaultAllocator::abandon_gc_alloc_regions() { + assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition"); + assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition"); + _retained_old_gc_alloc_region = NULL; +} + +G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : + ParGCAllocBuffer(gclab_word_size), _retired(true) { } + +HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) { + HeapWord* obj = NULL; + size_t gclab_word_size = _g1h->desired_plab_sz(purpose); + if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { + G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context); + add_to_alloc_buffer_waste(alloc_buf->words_remaining()); + alloc_buf->retire(false /* end_of_gc */, false /* retain */); + + HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context); + if (buf == NULL) { + return NULL; // Let caller handle allocation failure. + } + // Otherwise. + alloc_buf->set_word_size(gclab_word_size); + alloc_buf->set_buf(buf); + + obj = alloc_buf->allocate(word_sz); + assert(obj != NULL, "buffer was definitely big enough..."); + } else { + obj = _g1h->par_allocate_during_gc(purpose, word_sz, context); + } + return obj; +} + +G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) : + G1ParGCAllocator(g1h), + _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), + _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) { + + _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; + _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; + +} + +void G1DefaultParGCAllocator::retire_alloc_buffers() { + for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { + size_t waste = _alloc_buffers[ap]->words_remaining(); + add_to_alloc_buffer_waste(waste); + _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap), + true /* end_of_gc */, + false /* retain */); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP + +#include "gc_implementation/g1/g1AllocationContext.hpp" +#include "gc_implementation/g1/g1AllocRegion.hpp" +#include "gc_implementation/shared/parGCAllocBuffer.hpp" + +enum GCAllocPurpose { + GCAllocForTenured, + GCAllocForSurvived, + GCAllocPurposeCount +}; + +// Base class for G1 allocators. +class G1Allocator : public CHeapObj { + friend class VMStructs; +protected: + G1CollectedHeap* _g1h; + + // Outside of GC pauses, the number of bytes used in all regions other + // than the current allocation region. + size_t _summary_bytes_used; + +public: + G1Allocator(G1CollectedHeap* heap) : + _g1h(heap), _summary_bytes_used(0) { } + + static G1Allocator* create_allocator(G1CollectedHeap* g1h); + + virtual void init_mutator_alloc_region() = 0; + virtual void release_mutator_alloc_region() = 0; + + virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0; + virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0; + virtual void abandon_gc_alloc_regions() = 0; + + virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0; + virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0; + virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0; + virtual size_t used() = 0; + virtual bool is_retained_old_region(HeapRegion* hr) = 0; + + void reuse_retained_old_region(EvacuationInfo& evacuation_info, + OldGCAllocRegion* old, + HeapRegion** retained); + + size_t used_unlocked() const { + return _summary_bytes_used; + } + + void increase_used(size_t bytes) { + _summary_bytes_used += bytes; + } + + void decrease_used(size_t bytes) { + assert(_summary_bytes_used >= bytes, + err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT, + _summary_bytes_used, bytes)); + _summary_bytes_used -= bytes; + } + + void set_used(size_t bytes) { + _summary_bytes_used = bytes; + } + + virtual HeapRegion* new_heap_region(uint hrs_index, + G1BlockOffsetSharedArray* sharedOffsetArray, + MemRegion mr) { + return new HeapRegion(hrs_index, sharedOffsetArray, mr); + } +}; + +// The default allocator for G1. +class G1DefaultAllocator : public G1Allocator { +protected: + // Alloc region used to satisfy mutator allocation requests. + MutatorAllocRegion _mutator_alloc_region; + + // Alloc region used to satisfy allocation requests by the GC for + // survivor objects. + SurvivorGCAllocRegion _survivor_gc_alloc_region; + + // Alloc region used to satisfy allocation requests by the GC for + // old objects. + OldGCAllocRegion _old_gc_alloc_region; + + HeapRegion* _retained_old_gc_alloc_region; +public: + G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { } + + virtual void init_mutator_alloc_region(); + virtual void release_mutator_alloc_region(); + + virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info); + virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); + virtual void abandon_gc_alloc_regions(); + + virtual bool is_retained_old_region(HeapRegion* hr) { + return _retained_old_gc_alloc_region == hr; + } + + virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) { + return &_mutator_alloc_region; + } + + virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) { + return &_survivor_gc_alloc_region; + } + + virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) { + return &_old_gc_alloc_region; + } + + virtual size_t used() { + assert(Heap_lock->owner() != NULL, + "Should be owned on this thread's behalf."); + size_t result = _summary_bytes_used; + + // Read only once in case it is set to NULL concurrently + HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get(); + if (hr != NULL) { + result += hr->used(); + } + return result; + } +}; + +class G1ParGCAllocBuffer: public ParGCAllocBuffer { +private: + bool _retired; + +public: + G1ParGCAllocBuffer(size_t gclab_word_size); + virtual ~G1ParGCAllocBuffer() { + guarantee(_retired, "Allocation buffer has not been retired"); + } + + virtual void set_buf(HeapWord* buf) { + ParGCAllocBuffer::set_buf(buf); + _retired = false; + } + + virtual void retire(bool end_of_gc, bool retain) { + if (_retired) { + return; + } + ParGCAllocBuffer::retire(end_of_gc, retain); + _retired = true; + } +}; + +class G1ParGCAllocator : public CHeapObj { + friend class G1ParScanThreadState; +protected: + G1CollectedHeap* _g1h; + + size_t _alloc_buffer_waste; + size_t _undo_waste; + + void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } + void add_to_undo_waste(size_t waste) { _undo_waste += waste; } + + HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context); + + virtual void retire_alloc_buffers() = 0; + virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0; + +public: + G1ParGCAllocator(G1CollectedHeap* g1h) : + _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) { + } + + static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h); + + size_t alloc_buffer_waste() { return _alloc_buffer_waste; } + size_t undo_waste() {return _undo_waste; } + + HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) { + HeapWord* obj = NULL; + if (purpose == GCAllocForSurvived) { + obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes); + } else { + obj = alloc_buffer(purpose, context)->allocate(word_sz); + } + if (obj != NULL) { + return obj; + } + return allocate_slow(purpose, word_sz, context); + } + + void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) { + if (alloc_buffer(purpose, context)->contains(obj)) { + assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1), + "should contain whole object"); + alloc_buffer(purpose, context)->undo_allocation(obj, word_sz); + } else { + CollectedHeap::fill_with_object(obj, word_sz); + add_to_undo_waste(word_sz); + } + } +}; + +class G1DefaultParGCAllocator : public G1ParGCAllocator { + G1ParGCAllocBuffer _surviving_alloc_buffer; + G1ParGCAllocBuffer _tenured_alloc_buffer; + G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; + +public: + G1DefaultParGCAllocator(G1CollectedHeap* g1h); + + virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) { + return _alloc_buffers[purpose]; + } + + virtual void retire_alloc_buffers() ; +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1Allocator_ext.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1Allocator.hpp" +#include "gc_implementation/g1/g1CollectedHeap.hpp" + +G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) { + return new G1DefaultAllocator(g1h); +} + +G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) { + return new G1DefaultParGCAllocator(g1h); +} --- ./hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" +#include "gc_implementation/g1/heapRegion.hpp" #include "memory/space.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" @@ -35,60 +36,26 @@ // G1BlockOffsetSharedArray ////////////////////////////////////////////////////////////////////// -G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved, - size_t init_word_size) : - _reserved(reserved), _end(NULL) -{ - size_t size = compute_size(reserved.word_size()); - ReservedSpace rs(ReservedSpace::allocation_align_size_up(size)); - if (!rs.is_reserved()) { - vm_exit_during_initialization("Could not reserve enough space for heap offset array"); - } - if (!_vs.initialize(rs, 0)) { - vm_exit_during_initialization("Could not reserve enough space for heap offset array"); - } +G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) : + _reserved(), _end(NULL), _listener(), _offset_array(NULL) { - MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); + _reserved = heap; + _end = NULL; - _offset_array = (u_char*)_vs.low_boundary(); - resize(init_word_size); + MemRegion bot_reserved = storage->reserved(); + + _offset_array = (u_char*)bot_reserved.start(); + _end = _reserved.end(); + + storage->set_mapping_changed_listener(&_listener); + if (TraceBlockOffsetTable) { gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: "); gclog_or_tty->print_cr(" " " rs.base(): " INTPTR_FORMAT " rs.size(): " INTPTR_FORMAT " rs end(): " INTPTR_FORMAT, - rs.base(), rs.size(), rs.base() + rs.size()); - gclog_or_tty->print_cr(" " - " _vs.low_boundary(): " INTPTR_FORMAT - " _vs.high_boundary(): " INTPTR_FORMAT, - _vs.low_boundary(), - _vs.high_boundary()); - } -} - -void G1BlockOffsetSharedArray::resize(size_t new_word_size) { - assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved"); - size_t new_size = compute_size(new_word_size); - size_t old_size = _vs.committed_size(); - size_t delta; - char* high = _vs.high(); - _end = _reserved.start() + new_word_size; - if (new_size > old_size) { - delta = ReservedSpace::page_align_size_up(new_size - old_size); - assert(delta > 0, "just checking"); - if (!_vs.expand_by(delta)) { - // Do better than this for Merlin - vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion"); - } - assert(_vs.high() == high + delta, "invalid expansion"); - // Initialization of the contents is left to the - // G1BlockOffsetArray that uses it. - } else { - delta = ReservedSpace::page_align_size_down(old_size - new_size); - if (delta == 0) return; - _vs.shrink_by(delta); - assert(_vs.high() == high - delta, "invalid expansion"); + bot_reserved.start(), bot_reserved.byte_size(), bot_reserved.end()); } } @@ -98,28 +65,20 @@ return (delta & right_n_bits(LogN_words)) == (size_t)NoBits; } - ////////////////////////////////////////////////////////////////////// // G1BlockOffsetArray ////////////////////////////////////////////////////////////////////// G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array, - MemRegion mr, bool init_to_zero) : + MemRegion mr) : G1BlockOffsetTable(mr.start(), mr.end()), _unallocated_block(_bottom), - _array(array), _csp(NULL), - _init_to_zero(init_to_zero) { + _array(array), _gsp(NULL) { assert(_bottom <= _end, "arguments out of order"); - if (!_init_to_zero) { - // initialize cards to point back to mr.start() - set_remainder_to_point_to_start(mr.start() + N_words, mr.end()); - _array->set_offset_array(0, 0); // set first card to 0 - } } -void G1BlockOffsetArray::set_space(Space* sp) { - _sp = sp; - _csp = sp->toContiguousSpace(); +void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) { + _gsp = sp; } // The arguments follow the normal convention of denoting @@ -205,93 +164,6 @@ DEBUG_ONLY(check_all_cards(start_card, end_card);) } -// The block [blk_start, blk_end) has been allocated; -// adjust the block offset table to represent this information; -// right-open interval: [blk_start, blk_end) -void -G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) { - mark_block(blk_start, blk_end); - allocated(blk_start, blk_end); -} - -// Adjust BOT to show that a previously whole block has been split -// into two. -void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size, - size_t left_blk_size) { - // Verify that the BOT shows [blk, blk + blk_size) to be one block. - verify_single_block(blk, blk_size); - // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size) - // is one single block. - mark_block(blk + left_blk_size, blk + blk_size); -} - - -// Action_mark - update the BOT for the block [blk_start, blk_end). -// Current typical use is for splitting a block. -// Action_single - update the BOT for an allocation. -// Action_verify - BOT verification. -void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start, - HeapWord* blk_end, - Action action) { - assert(Universe::heap()->is_in_reserved(blk_start), - "reference must be into the heap"); - assert(Universe::heap()->is_in_reserved(blk_end-1), - "limit must be within the heap"); - // This is optimized to make the test fast, assuming we only rarely - // cross boundaries. - uintptr_t end_ui = (uintptr_t)(blk_end - 1); - uintptr_t start_ui = (uintptr_t)blk_start; - // Calculate the last card boundary preceding end of blk - intptr_t boundary_before_end = (intptr_t)end_ui; - clear_bits(boundary_before_end, right_n_bits(LogN)); - if (start_ui <= (uintptr_t)boundary_before_end) { - // blk starts at or crosses a boundary - // Calculate index of card on which blk begins - size_t start_index = _array->index_for(blk_start); - // Index of card on which blk ends - size_t end_index = _array->index_for(blk_end - 1); - // Start address of card on which blk begins - HeapWord* boundary = _array->address_for_index(start_index); - assert(boundary <= blk_start, "blk should start at or after boundary"); - if (blk_start != boundary) { - // blk starts strictly after boundary - // adjust card boundary and start_index forward to next card - boundary += N_words; - start_index++; - } - assert(start_index <= end_index, "monotonicity of index_for()"); - assert(boundary <= (HeapWord*)boundary_before_end, "tautology"); - switch (action) { - case Action_mark: { - if (init_to_zero()) { - _array->set_offset_array(start_index, boundary, blk_start); - break; - } // Else fall through to the next case - } - case Action_single: { - _array->set_offset_array(start_index, boundary, blk_start); - // We have finished marking the "offset card". We need to now - // mark the subsequent cards that this blk spans. - if (start_index < end_index) { - HeapWord* rem_st = _array->address_for_index(start_index) + N_words; - HeapWord* rem_end = _array->address_for_index(end_index) + N_words; - set_remainder_to_point_to_start(rem_st, rem_end); - } - break; - } - case Action_check: { - _array->check_offset_array(start_index, boundary, blk_start); - // We have finished checking the "offset card". We need to now - // check the subsequent cards that this blk spans. - check_all_cards(start_index + 1, end_index); - break; - } - default: - ShouldNotReachHere(); - } - } -} - // The card-interval [start_card, end_card] is a closed interval; this // is an expensive check -- use with care and only under protection of // suitable flag. @@ -330,25 +202,6 @@ } } -// The range [blk_start, blk_end) represents a single contiguous block -// of storage; modify the block offset table to represent this -// information; Right-open interval: [blk_start, blk_end) -// NOTE: this method does _not_ adjust _unallocated_block. -void -G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) { - do_block_internal(blk_start, blk_end, Action_single); -} - -// Mark the BOT such that if [blk_start, blk_end) straddles a card -// boundary, the card following the first such boundary is marked -// with the appropriate offset. -// NOTE: this method does _not_ adjust _unallocated_block or -// any cards subsequent to the first one. -void -G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) { - do_block_internal(blk_start, blk_end, Action_mark); -} - HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) { assert(_bottom <= addr && addr < _end, "addr must be covered by this Array"); @@ -378,7 +231,7 @@ } // Otherwise, find the block start using the table. HeapWord* q = block_at_or_preceding(addr, false, 0); - HeapWord* n = q + _sp->block_size(q); + HeapWord* n = q + block_size(q); return forward_to_block_containing_addr_const(q, n, addr); } @@ -406,86 +259,28 @@ err_msg("next_boundary is beyond the end of the covered region " " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT, next_boundary, _array->_end)); - if (csp() != NULL) { - if (addr >= csp()->top()) return csp()->top(); - while (next_boundary < addr) { - while (n <= next_boundary) { - q = n; - oop obj = oop(q); - if (obj->klass_or_null() == NULL) return q; - n += obj->size(); - } - assert(q <= next_boundary && n > next_boundary, "Consequence of loop"); - // [q, n) is the block that crosses the boundary. - alloc_block_work2(&next_boundary, &next_index, q, n); + if (addr >= gsp()->top()) return gsp()->top(); + while (next_boundary < addr) { + while (n <= next_boundary) { + q = n; + oop obj = oop(q); + if (obj->klass_or_null() == NULL) return q; + n += block_size(q); } - } else { - while (next_boundary < addr) { - while (n <= next_boundary) { - q = n; - oop obj = oop(q); - if (obj->klass_or_null() == NULL) return q; - n += _sp->block_size(q); - } - assert(q <= next_boundary && n > next_boundary, "Consequence of loop"); - // [q, n) is the block that crosses the boundary. - alloc_block_work2(&next_boundary, &next_index, q, n); - } + assert(q <= next_boundary && n > next_boundary, "Consequence of loop"); + // [q, n) is the block that crosses the boundary. + alloc_block_work2(&next_boundary, &next_index, q, n); } return forward_to_block_containing_addr_const(q, n, addr); } -HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const { - assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); - - assert(_bottom <= addr && addr < _end, - "addr must be covered by this Array"); - // Must read this exactly once because it can be modified by parallel - // allocation. - HeapWord* ub = _unallocated_block; - if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { - assert(ub < _end, "tautology (see above)"); - return ub; - } - - // Otherwise, find the block start using the table, but taking - // care (cf block_start_unsafe() above) not to parse any objects/blocks - // on the cards themsleves. - size_t index = _array->index_for(addr); - assert(_array->address_for_index(index) == addr, - "arg should be start of card"); - - HeapWord* q = (HeapWord*)addr; - uint offset; - do { - offset = _array->offset_array(index--); - q -= offset; - } while (offset == N_words); - assert(q <= addr, "block start should be to left of arg"); - return q; -} - // Note that the committed size of the covered space may have changed, // so the table size might also wish to change. void G1BlockOffsetArray::resize(size_t new_word_size) { HeapWord* new_end = _bottom + new_word_size; - if (_end < new_end && !init_to_zero()) { - // verify that the old and new boundaries are also card boundaries - assert(_array->is_card_boundary(_end), - "_end not a card boundary"); - assert(_array->is_card_boundary(new_end), - "new _end would not be a card boundary"); - // set all the newly added cards - _array->set_offset_array(_end, new_end, N_words); - } _end = new_end; // update _end } -void G1BlockOffsetArray::set_region(MemRegion mr) { - _bottom = mr.start(); - _end = mr.end(); -} - // // threshold_ // | _index_ @@ -638,19 +433,38 @@ assert(_bottom <= addr && addr < _end, "addr must be covered by this Array"); HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1); - HeapWord* n = q + _sp->block_size(q); + HeapWord* n = q + block_size(q); return forward_to_block_containing_addr_const(q, n, addr); } G1BlockOffsetArrayContigSpace:: G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr) : - G1BlockOffsetArray(array, mr, true) + G1BlockOffsetArray(array, mr) { _next_offset_threshold = NULL; _next_offset_index = 0; } +HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() { + assert(!Universe::heap()->is_in_reserved(_array->_offset_array), + "just checking"); + _next_offset_index = _array->index_for_raw(_bottom); + _next_offset_index++; + _next_offset_threshold = + _array->address_for_index_raw(_next_offset_index); + return _next_offset_threshold; +} + +void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() { + assert(!Universe::heap()->is_in_reserved(_array->_offset_array), + "just checking"); + size_t bottom_index = _array->index_for_raw(_bottom); + assert(_array->address_for_index_raw(bottom_index) == _bottom, + "Precondition of call"); + _array->set_offset_array_raw(bottom_index, 0); +} + HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() { assert(!Universe::heap()->is_in_reserved(_array->_offset_array), "just checking"); @@ -661,22 +475,12 @@ return _next_offset_threshold; } -void G1BlockOffsetArrayContigSpace::zero_bottom_entry() { - assert(!Universe::heap()->is_in_reserved(_array->_offset_array), - "just checking"); - size_t bottom_index = _array->index_for(_bottom); - assert(_array->address_for_index(bottom_index) == _bottom, - "Precondition of call"); - _array->set_offset_array(bottom_index, 0); -} - void G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) { assert(new_top <= _end, "_end should have already been updated"); // The first BOT entry should have offset 0. - zero_bottom_entry(); - initialize_threshold(); + reset_bot(); alloc_block(_bottom, new_top); } --- ./hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "memory/memRegion.hpp" #include "runtime/virtualspace.hpp" #include "utilities/globalDefinitions.hpp" @@ -52,8 +53,8 @@ // consolidation. // Forward declarations -class ContiguousSpace; class G1BlockOffsetSharedArray; +class G1OffsetTableContigSpace; class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC { friend class VMStructs; @@ -106,6 +107,16 @@ inline HeapWord* block_start_const(const void* addr) const; }; +class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener { + public: + virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled) { + // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot + // retrieve it here since this would cause firing of several asserts. The code + // executed after commit of a region already needs to do some re-initialization of + // the HeapRegion, so we combine that. + } +}; + // This implementation of "G1BlockOffsetTable" divides the covered region // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry // for each such subregion indicates how far back one must go to find the @@ -125,6 +136,7 @@ friend class VMStructs; private: + G1BlockOffsetSharedArrayMappingChangedListener _listener; // The reserved region covered by the shared array. MemRegion _reserved; @@ -133,16 +145,8 @@ // Array for keeping offsets for retrieving object start fast given an // address. - VirtualSpace _vs; u_char* _offset_array; // byte array keeping backwards offsets - void check_index(size_t index, const char* msg) const { - assert(index < _vs.committed_size(), - err_msg("%s - " - "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT, - msg, index, _vs.committed_size())); - } - void check_offset(size_t offset, const char* msg) const { assert(offset <= N_words, err_msg("%s - " @@ -152,76 +156,29 @@ // Bounds checking accessors: // For performance these have to devolve to array accesses in product builds. - u_char offset_array(size_t index) const { - check_index(index, "index out of range"); - return _offset_array[index]; - } + inline u_char offset_array(size_t index) const; - void set_offset_array(size_t index, u_char offset) { - check_index(index, "index out of range"); - check_offset(offset, "offset too large"); + void set_offset_array_raw(size_t index, u_char offset) { _offset_array[index] = offset; } - void set_offset_array(size_t index, HeapWord* high, HeapWord* low) { - check_index(index, "index out of range"); - assert(high >= low, "addresses out of order"); - check_offset(pointer_delta(high, low), "offset too large"); - _offset_array[index] = (u_char) pointer_delta(high, low); - } + inline void set_offset_array(size_t index, u_char offset); - void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { - check_index(index_for(right - 1), "right address out of range"); - assert(left < right, "Heap addresses out of order"); - size_t num_cards = pointer_delta(right, left) >> LogN_words; - if (UseMemSetInBOT) { - memset(&_offset_array[index_for(left)], offset, num_cards); - } else { - size_t i = index_for(left); - const size_t end = i + num_cards; - for (; i < end; i++) { - _offset_array[i] = offset; - } - } - } + inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low); - void set_offset_array(size_t left, size_t right, u_char offset) { - check_index(right, "right index out of range"); - assert(left <= right, "indexes out of order"); - size_t num_cards = right - left + 1; - if (UseMemSetInBOT) { - memset(&_offset_array[left], offset, num_cards); - } else { - size_t i = left; - const size_t end = i + num_cards; - for (; i < end; i++) { - _offset_array[i] = offset; - } - } - } - - void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { - check_index(index, "index out of range"); - assert(high >= low, "addresses out of order"); - check_offset(pointer_delta(high, low), "offset too large"); - assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset"); - } + inline void set_offset_array(size_t left, size_t right, u_char offset); bool is_card_boundary(HeapWord* p) const; +public: + // Return the number of slots needed for an offset array // that covers mem_region_words words. - // We always add an extra slot because if an object - // ends on a card boundary we put a 0 in the next - // offset array slot, so we want that slot always - // to be reserved. - - size_t compute_size(size_t mem_region_words) { - size_t number_of_slots = (mem_region_words / N_words) + 1; - return ReservedSpace::page_align_size_up(number_of_slots); + static size_t compute_size(size_t mem_region_words) { + size_t number_of_slots = (mem_region_words / N_words); + return ReservedSpace::allocation_align_size_up(number_of_slots); } -public: enum SomePublicConstants { LogN = 9, LogN_words = LogN - LogHeapWordSize, @@ -235,25 +192,19 @@ // least "init_word_size".) The contents of the initial table are // undefined; it is the responsibility of the constituent // G1BlockOffsetTable(s) to initialize cards. - G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); - - // Notes a change in the committed size of the region covered by the - // table. The "new_word_size" may not be larger than the size of the - // reserved region this table covers. - void resize(size_t new_word_size); - - void set_bottom(HeapWord* new_bottom); - - // Updates all the BlockOffsetArray's sharing this shared array to - // reflect the current "top"'s of their spaces. - void update_offset_arrays(); + G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage); // Return the appropriate index into "_offset_array" for "p". inline size_t index_for(const void* p) const; + inline size_t index_for_raw(const void* p) const; // Return the address indicating the start of the region corresponding to // "index" in "_offset_array". inline HeapWord* address_for_index(size_t index) const; + // Variant of address_for_index that does not check the index for validity. + inline HeapWord* address_for_index_raw(size_t index) const { + return _reserved.start() + (index << LogN_words); + } }; // And here is the G1BlockOffsetTable subtype that uses the array. @@ -268,28 +219,12 @@ LogN = G1BlockOffsetSharedArray::LogN }; - // The following enums are used by do_block_helper - enum Action { - Action_single, // BOT records a single block (see single_block()) - Action_mark, // BOT marks the start of a block (see mark_block()) - Action_check // Check that BOT records block correctly - // (see verify_single_block()). - }; - // This is the array, which can be shared by several BlockOffsetArray's // servicing different G1BlockOffsetSharedArray* _array; // The space that owns this subregion. - Space* _sp; - - // If "_sp" is a contiguous space, the field below is the view of "_sp" - // as a contiguous space, else NULL. - ContiguousSpace* _csp; - - // If true, array entries are initialized to 0; otherwise, they are - // initialized to point backwards to the beginning of the covered region. - bool _init_to_zero; + G1OffsetTableContigSpace* _gsp; // The portion [_unallocated_block, _sp.end()) of the space that // is a single block known not to contain any objects. @@ -305,12 +240,11 @@ // that is closed: [start_index, end_index] void set_remainder_to_point_to_start_incl(size_t start, size_t end); - // A helper function for BOT adjustment/verification work - void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action); - protected: - ContiguousSpace* csp() const { return _csp; } + G1OffsetTableContigSpace* gsp() const { return _gsp; } + + inline size_t block_size(const HeapWord* p) const; // Returns the address of a block whose start is at most "addr". // If "has_max_index" is true, "assumes "max_index" is the last valid one @@ -353,126 +287,29 @@ public: // The space may not have it's bottom and top set yet, which is why the - // region is passed as a parameter. If "init_to_zero" is true, the - // elements of the array are initialized to zero. Otherwise, they are - // initialized to point backwards to the beginning. - G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr, - bool init_to_zero); + // region is passed as a parameter. The elements of the array are + // initialized to zero. + G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr); // Note: this ought to be part of the constructor, but that would require // "this" to be passed as a parameter to a member constructor for // the containing concrete subtype of Space. // This would be legal C++, but MS VC++ doesn't allow it. - void set_space(Space* sp); - - // Resets the covered region to the given "mr". - void set_region(MemRegion mr); + void set_space(G1OffsetTableContigSpace* sp); // Resets the covered region to one with the same _bottom as before but // the "new_word_size". void resize(size_t new_word_size); - // These must be guaranteed to work properly (i.e., do nothing) - // when "blk_start" ("blk" for second version) is "NULL". - virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); - virtual void alloc_block(HeapWord* blk, size_t size) { - alloc_block(blk, blk + size); - } - - // The following methods are useful and optimized for a - // general, non-contiguous space. - - // Given a block [blk_start, blk_start + full_blk_size), and - // a left_blk_size < full_blk_size, adjust the BOT to show two - // blocks [blk_start, blk_start + left_blk_size) and - // [blk_start + left_blk_size, blk_start + full_blk_size). - // It is assumed (and verified in the non-product VM) that the - // BOT was correct for the original block. - void split_block(HeapWord* blk_start, size_t full_blk_size, - size_t left_blk_size); - - // Adjust the BOT to show that it has a single block in the - // range [blk_start, blk_start + size). All necessary BOT - // cards are adjusted, but _unallocated_block isn't. - void single_block(HeapWord* blk_start, HeapWord* blk_end); - void single_block(HeapWord* blk, size_t size) { - single_block(blk, blk + size); - } - - // Adjust BOT to show that it has a block in the range - // [blk_start, blk_start + size). Only the first card - // of BOT is touched. It is assumed (and verified in the - // non-product VM) that the remaining cards of the block - // are correct. - void mark_block(HeapWord* blk_start, HeapWord* blk_end); - void mark_block(HeapWord* blk, size_t size) { - mark_block(blk, blk + size); - } - - // Adjust _unallocated_block to indicate that a particular - // block has been newly allocated or freed. It is assumed (and - // verified in the non-product VM) that the BOT is correct for - // the given block. - inline void allocated(HeapWord* blk_start, HeapWord* blk_end) { - // Verify that the BOT shows [blk, blk + blk_size) to be one block. - verify_single_block(blk_start, blk_end); - if (BlockOffsetArrayUseUnallocatedBlock) { - _unallocated_block = MAX2(_unallocated_block, blk_end); - } - } - - inline void allocated(HeapWord* blk, size_t size) { - allocated(blk, blk + size); - } - - inline void freed(HeapWord* blk_start, HeapWord* blk_end); - - inline void freed(HeapWord* blk, size_t size); - virtual HeapWord* block_start_unsafe(const void* addr); virtual HeapWord* block_start_unsafe_const(const void* addr) const; - // Requires "addr" to be the start of a card and returns the - // start of the block that contains the given address. - HeapWord* block_start_careful(const void* addr) const; - - // If true, initialize array slots with no allocated blocks to zero. - // Otherwise, make them point back to the front. - bool init_to_zero() { return _init_to_zero; } - - // Verification & debugging - ensure that the offset table reflects the fact - // that the block [blk_start, blk_end) or [blk, blk + size) is a - // single block of storage. NOTE: can;t const this because of - // call to non-const do_block_internal() below. - inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) { - if (VerifyBlockOffsetArray) { - do_block_internal(blk_start, blk_end, Action_check); - } - } - - inline void verify_single_block(HeapWord* blk, size_t size) { - verify_single_block(blk, blk + size); - } - // Used by region verification. Checks that the contents of the // BOT reflect that there's a single object that spans the address // range [obj_start, obj_start + word_size); returns true if this is // the case, returns false if it's not. bool verify_for_object(HeapWord* obj_start, size_t word_size) const; - // Verify that the given block is before _unallocated_block - inline void verify_not_unallocated(HeapWord* blk_start, - HeapWord* blk_end) const { - if (BlockOffsetArrayUseUnallocatedBlock) { - assert(blk_start < blk_end, "Block inconsistency?"); - assert(blk_end <= _unallocated_block, "_unallocated_block problem"); - } - } - - inline void verify_not_unallocated(HeapWord* blk, size_t size) const { - verify_not_unallocated(blk, blk + size); - } - void check_all_cards(size_t left_card, size_t right_card) const; virtual void print_on(outputStream* out) PRODUCT_RETURN; @@ -495,6 +332,12 @@ blk_start, blk_end); } + // Zero out the entry for _bottom (offset will be zero). Does not check for availability of the + // memory first. + void zero_bottom_entry_raw(); + // Variant of initialize_threshold that does not check for availability of the + // memory first. + HeapWord* initialize_threshold_raw(); public: G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr); @@ -502,8 +345,10 @@ // bottom of the covered region. HeapWord* initialize_threshold(); - // Zero out the entry for _bottom (offset will be zero). - void zero_bottom_entry(); + void reset_bot() { + zero_bottom_entry_raw(); + initialize_threshold_raw(); + } // Return the next threshold, the point at which the table should be // updated. --- ./hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,6 +26,8 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP #include "gc_implementation/g1/g1BlockOffsetTable.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/heapRegion.inline.hpp" #include "memory/space.hpp" inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) { @@ -45,14 +47,62 @@ } } +#define check_index(index, msg) \ + assert((index) < (_reserved.word_size() >> LogN_words), \ + err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \ + msg, (index), (_reserved.word_size() >> LogN_words))); \ + assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)), \ + err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT \ + " (%u) is not in committed area.", \ + (index), \ + p2i(address_for_index_raw(index)), \ + G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index)))); + +u_char G1BlockOffsetSharedArray::offset_array(size_t index) const { + check_index(index, "index out of range"); + return _offset_array[index]; +} + +void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) { + check_index(index, "index out of range"); + set_offset_array_raw(index, offset); +} + +void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) { + check_index(index, "index out of range"); + assert(high >= low, "addresses out of order"); + size_t offset = pointer_delta(high, low); + check_offset(offset, "offset too large"); + set_offset_array(index, (u_char)offset); +} + +void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) { + check_index(right, "right index out of range"); + assert(left <= right, "indexes out of order"); + size_t num_cards = right - left + 1; + if (UseMemSetInBOT) { + memset(&_offset_array[left], offset, num_cards); + } else { + size_t i = left; + const size_t end = i + num_cards; + for (; i < end; i++) { + _offset_array[i] = offset; + } + } +} + +// Variant of index_for that does not check the index for validity. +inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const { + return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN; +} + inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const { char* pc = (char*)p; assert(pc >= (char*)_reserved.start() && pc < (char*)_reserved.end(), err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")", p2i(p), p2i(_reserved.start()), p2i(_reserved.end()))); - size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char)); - size_t result = delta >> LogN; + size_t result = index_for_raw(p); check_index(result, "bad index from address"); return result; } @@ -60,7 +110,7 @@ inline HeapWord* G1BlockOffsetSharedArray::address_for_index(size_t index) const { check_index(index, "index out of range"); - HeapWord* result = _reserved.start() + (index << LogN_words); + HeapWord* result = address_for_index_raw(index); assert(result >= _reserved.start() && result < _reserved.end(), err_msg("bad address from index result " PTR_FORMAT " _reserved.start() " PTR_FORMAT " _reserved.end() " @@ -69,6 +119,13 @@ return result; } +#undef check_index + +inline size_t +G1BlockOffsetArray::block_size(const HeapWord* p) const { + return gsp()->block_size(p); +} + inline HeapWord* G1BlockOffsetArray::block_at_or_preceding(const void* addr, bool has_max_index, @@ -88,7 +145,7 @@ // to go back by. size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset); q -= (N_words * n_cards_back); - assert(q >= _sp->bottom(), "Went below bottom!"); + assert(q >= gsp()->bottom(), "Went below bottom!"); index -= n_cards_back; offset = _array->offset_array(index); } @@ -101,21 +158,12 @@ G1BlockOffsetArray:: forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, const void* addr) const { - if (csp() != NULL) { - if (addr >= csp()->top()) return csp()->top(); - while (n <= addr) { - q = n; - oop obj = oop(q); - if (obj->klass_or_null() == NULL) return q; - n += obj->size(); - } - } else { - while (n <= addr) { - q = n; - oop obj = oop(q); - if (obj->klass_or_null() == NULL) return q; - n += _sp->block_size(q); - } + if (addr >= gsp()->top()) return gsp()->top(); + while (n <= addr) { + q = n; + oop obj = oop(q); + if (obj->klass_or_null() == NULL) return q; + n += block_size(q); } assert(q <= n, "wrong order for q and addr"); assert(addr < n, "wrong order for addr and n"); @@ -126,7 +174,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q, const void* addr) { if (oop(q)->klass_or_null() == NULL) return q; - HeapWord* n = q + _sp->block_size(q); + HeapWord* n = q + block_size(q); // In the normal case, where the query "addr" is a card boundary, and the // offset table chunks are the same size as cards, the block starting at // "q" will contain addr, so the test below will fail, and we'll fall @@ -138,28 +186,4 @@ return q; } -////////////////////////////////////////////////////////////////////////// -// BlockOffsetArrayNonContigSpace inlines -////////////////////////////////////////////////////////////////////////// -inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) { - // Verify that the BOT shows [blk_start, blk_end) to be one block. - verify_single_block(blk_start, blk_end); - // adjust _unallocated_block upward or downward - // as appropriate - if (BlockOffsetArrayUseUnallocatedBlock) { - assert(_unallocated_block <= _end, - "Inconsistent value for _unallocated_block"); - if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) { - // CMS-specific note: a block abutting _unallocated_block to - // its left is being freed, a new block is being added or - // we are resetting following a compaction - _unallocated_block = blk_start; - } - } -} - -inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) { - freed(blk, blk + size); -} - #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -33,31 +33,29 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC +void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { + if (zero_filled) { + return; + } + MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords); + _counts->clear_range(mr); +} + void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) { if (has_count_table()) { - assert(from_card_num >= 0 && from_card_num < _committed_max_card_num, - err_msg("from card num out of range: "SIZE_FORMAT, from_card_num)); assert(from_card_num < to_card_num, err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT, from_card_num, to_card_num)); - assert(to_card_num <= _committed_max_card_num, - err_msg("to card num out of range: " - "to: "SIZE_FORMAT ", " - "max: "SIZE_FORMAT, - to_card_num, _committed_max_card_num)); - - to_card_num = MIN2(_committed_max_card_num, to_card_num); - Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num)); } } G1CardCounts::G1CardCounts(G1CollectedHeap *g1h): - _g1h(g1h), _card_counts(NULL), - _reserved_max_card_num(0), _committed_max_card_num(0), - _committed_size(0) {} + _listener(), _g1h(g1h), _card_counts(NULL), _reserved_max_card_num(0) { + _listener.set_cardcounts(this); +} -void G1CardCounts::initialize() { +void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) { assert(_g1h->max_capacity() > 0, "initialization order"); assert(_g1h->capacity() == 0, "initialization order"); @@ -70,70 +68,9 @@ _ct_bs = _g1h->g1_barrier_set(); _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start()); - // Allocate/Reserve the counts table - size_t reserved_bytes = _g1h->max_capacity(); - _reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift; - - size_t reserved_size = _reserved_max_card_num * sizeof(jbyte); - ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size)); - if (!rs.is_reserved()) { - warning("Could not reserve enough space for the card counts table"); - guarantee(!has_reserved_count_table(), "should be NULL"); - return; - } - - MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); - - _card_counts_storage.initialize(rs, 0); - _card_counts = (jubyte*) _card_counts_storage.low(); - } -} - -void G1CardCounts::resize(size_t heap_capacity) { - // Expand the card counts table to handle a heap with the given capacity. - - if (!has_reserved_count_table()) { - // Don't expand if we failed to reserve the card counts table. - return; - } - - assert(_committed_size == - ReservedSpace::allocation_align_size_up(_committed_size), - err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size)); - - // Verify that the committed space for the card counts matches our - // committed max card num. Note for some allocation alignments, the - // amount of space actually committed for the counts table will be able - // to span more cards than the number spanned by the maximum heap. - size_t prev_committed_size = _committed_size; - size_t prev_committed_card_num = committed_to_card_num(prev_committed_size); - - assert(prev_committed_card_num == _committed_max_card_num, - err_msg("Card mismatch: " - "prev: " SIZE_FORMAT ", " - "committed: "SIZE_FORMAT", " - "reserved: "SIZE_FORMAT, - prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num)); - - size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte); - size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size); - size_t new_committed_card_num = committed_to_card_num(new_committed_size); - - if (_committed_max_card_num < new_committed_card_num) { - // we need to expand the backing store for the card counts - size_t expand_size = new_committed_size - prev_committed_size; - - if (!_card_counts_storage.expand_by(expand_size)) { - warning("Card counts table backing store commit failure"); - return; - } - assert(_card_counts_storage.committed_size() == new_committed_size, - "expansion commit failure"); - - _committed_size = new_committed_size; - _committed_max_card_num = new_committed_card_num; - - clear_range(prev_committed_card_num, _committed_max_card_num); + _card_counts = (jubyte*) mapper->reserved().start(); + _reserved_max_card_num = mapper->reserved().byte_size(); + mapper->set_mapping_changed_listener(&_listener); } } @@ -149,12 +86,13 @@ uint count = 0; if (has_count_table()) { size_t card_num = ptr_2_card_num(card_ptr); - if (card_num < _committed_max_card_num) { - count = (uint) _card_counts[card_num]; - if (count < G1ConcRSHotCardLimit) { - _card_counts[card_num] = - (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit)); - } + assert(card_num < _reserved_max_card_num, + err_msg("Card "SIZE_FORMAT" outside of card counts table (max size "SIZE_FORMAT")", + card_num, _reserved_max_card_num)); + count = (uint) _card_counts[card_num]; + if (count < G1ConcRSHotCardLimit) { + _card_counts[card_num] = + (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit)); } } return count; @@ -165,31 +103,23 @@ } void G1CardCounts::clear_region(HeapRegion* hr) { - assert(!hr->isHumongous(), "Should have been cleared"); + MemRegion mr(hr->bottom(), hr->end()); + clear_range(mr); +} + +void G1CardCounts::clear_range(MemRegion mr) { if (has_count_table()) { - HeapWord* bottom = hr->bottom(); - - // We use the last address in hr as hr could be the - // last region in the heap. In which case trying to find - // the card for hr->end() will be an OOB accesss to the - // card table. - HeapWord* last = hr->end() - 1; - assert(_g1h->g1_committed().contains(last), - err_msg("last not in committed: " - "last: " PTR_FORMAT ", " - "committed: [" PTR_FORMAT ", " PTR_FORMAT ")", - last, - _g1h->g1_committed().start(), - _g1h->g1_committed().end())); - - const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom); - const jbyte* last_card_ptr = _ct_bs->byte_for_const(last); + const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start()); + // We use the last address in the range as the range could represent the + // last region in the heap. In which case trying to find the card will be an + // OOB access to the card table. + const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last()); #ifdef ASSERT HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr); - assert(start_addr == hr->bottom(), "alignment"); + assert(start_addr == mr.start(), "MemRegion start must be aligned to a card."); HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr); - assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment"); + assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card."); #endif // ASSERT // Clear the counts for the (exclusive) card range. @@ -199,14 +129,22 @@ } } +class G1CardCountsClearClosure : public HeapRegionClosure { + private: + G1CardCounts* _card_counts; + public: + G1CardCountsClearClosure(G1CardCounts* card_counts) : + HeapRegionClosure(), _card_counts(card_counts) { } + + + virtual bool doHeapRegion(HeapRegion* r) { + _card_counts->clear_region(r); + return false; + } +}; + void G1CardCounts::clear_all() { assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise"); - clear_range((size_t)0, _committed_max_card_num); + G1CardCountsClearClosure cl(this); + _g1h->heap_region_iterate(&cl); } - -G1CardCounts::~G1CardCounts() { - if (has_reserved_count_table()) { - _card_counts_storage.release(); - } -} - --- ./hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,14 +25,26 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "memory/allocation.hpp" #include "runtime/virtualspace.hpp" #include "utilities/globalDefinitions.hpp" class CardTableModRefBS; +class G1CardCounts; class G1CollectedHeap; +class G1RegionToSpaceMapper; class HeapRegion; +class G1CardCountsMappingChangedListener : public G1MappingChangedListener { + private: + G1CardCounts* _counts; + public: + void set_cardcounts(G1CardCounts* counts) { _counts = counts; } + + virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); +}; + // Table to track the number of times a card has been refined. Once // a card has been refined a certain number of times, it is // considered 'hot' and its refinement is delayed by inserting the @@ -41,6 +53,8 @@ // is 'drained' during the next evacuation pause. class G1CardCounts: public CHeapObj { + G1CardCountsMappingChangedListener _listener; + G1CollectedHeap* _g1h; // The table of counts @@ -49,27 +63,18 @@ // Max capacity of the reserved space for the counts table size_t _reserved_max_card_num; - // Max capacity of the committed space for the counts table - size_t _committed_max_card_num; - - // Size of committed space for the counts table - size_t _committed_size; - // CardTable bottom. const jbyte* _ct_bot; // Barrier set CardTableModRefBS* _ct_bs; - // The virtual memory backing the counts table - VirtualSpace _card_counts_storage; - // Returns true if the card counts table has been reserved. bool has_reserved_count_table() { return _card_counts != NULL; } // Returns true if the card counts table has been reserved and committed. bool has_count_table() { - return has_reserved_count_table() && _committed_max_card_num > 0; + return has_reserved_count_table(); } size_t ptr_2_card_num(const jbyte* card_ptr) { @@ -79,37 +84,24 @@ "_ct_bot: " PTR_FORMAT, p2i(card_ptr), p2i(_ct_bot))); size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte)); - assert(card_num >= 0 && card_num < _committed_max_card_num, + assert(card_num >= 0 && card_num < _reserved_max_card_num, err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr))); return card_num; } jbyte* card_num_2_ptr(size_t card_num) { - assert(card_num >= 0 && card_num < _committed_max_card_num, + assert(card_num >= 0 && card_num < _reserved_max_card_num, err_msg("card num out of range: "SIZE_FORMAT, card_num)); return (jbyte*) (_ct_bot + card_num); } - // Helper routine. - // Returns the number of cards that can be counted by the given committed - // table size, with a maximum of the number of cards spanned by the max - // capacity of the heap. - size_t committed_to_card_num(size_t committed_size) { - return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte)); - } - // Clear the counts table for the given (exclusive) index range. void clear_range(size_t from_card_num, size_t to_card_num); public: G1CardCounts(G1CollectedHeap* g1h); - ~G1CardCounts(); - void initialize(); - - // Resize the committed space for the card counts table in - // response to a resize of the committed space for the heap. - void resize(size_t heap_capacity); + void initialize(G1RegionToSpaceMapper* mapper); // Increments the refinement count for the given card. // Returns the pre-increment count value. @@ -122,8 +114,10 @@ // Clears the card counts for the cards spanned by the region void clear_region(HeapRegion* hr); + // Clears the card counts for the cards spanned by the MemRegion + void clear_range(MemRegion mr); + // Clear the entire card counts table during GC. - // Updates the policy stats with the duration. void clear_all(); }; --- ./hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -22,298 +22,386 @@ * */ - #include "precompiled.hpp" +#include "code/codeCache.hpp" #include "code/nmethod.hpp" #include "gc_implementation/g1/g1CodeCacheRemSet.hpp" +#include "gc_implementation/g1/heapRegion.hpp" +#include "memory/heap.hpp" #include "memory/iterator.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/hashtable.inline.hpp" +#include "utilities/stack.inline.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC -G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL) { - _top = bottom(); +class CodeRootSetTable : public Hashtable { + friend class G1CodeRootSetTest; + typedef HashtableEntry Entry; + + static CodeRootSetTable* volatile _purge_list; + + CodeRootSetTable* _purge_next; + + unsigned int compute_hash(nmethod* nm) { + uintptr_t hash = (uintptr_t)nm; + return hash ^ (hash >> 7); // code heap blocks are 128byte aligned + } + + void remove_entry(Entry* e, Entry* previous); + Entry* new_entry(nmethod* nm); + + public: + CodeRootSetTable(int size) : Hashtable(size, sizeof(Entry)), _purge_next(NULL) {} + ~CodeRootSetTable(); + + // Needs to be protected locks + bool add(nmethod* nm); + bool remove(nmethod* nm); + + // Can be called without locking + bool contains(nmethod* nm); + + int entry_size() const { return BasicHashtable::entry_size(); } + + void copy_to(CodeRootSetTable* new_table); + void nmethods_do(CodeBlobClosure* blk); + + template + int remove_if(CB& should_remove); + + static void purge_list_append(CodeRootSetTable* tbl); + static void purge(); + + static size_t static_mem_size() { + return sizeof(_purge_list); + } +}; + +CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL; + +CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) { + unsigned int hash = compute_hash(nm); + Entry* entry = (Entry*) new_entry_free_list(); + if (entry == NULL) { + entry = (Entry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtGC, CURRENT_PC); + } + entry->set_next(NULL); + entry->set_hash(hash); + entry->set_literal(nm); + return entry; } -void G1CodeRootChunk::reset() { - _next = _prev = NULL; - _top = bottom(); +void CodeRootSetTable::remove_entry(Entry* e, Entry* previous) { + int index = hash_to_index(e->hash()); + assert((e == bucket(index)) == (previous == NULL), "if e is the first entry then previous should be null"); + + if (previous == NULL) { + set_entry(index, e->next()); + } else { + previous->set_next(e->next()); + } + free_entry(e); } -void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) { - nmethod** cur = bottom(); - while (cur != _top) { - cl->do_code_blob(*cur); - cur++; +CodeRootSetTable::~CodeRootSetTable() { + for (int index = 0; index < table_size(); ++index) { + for (Entry* e = bucket(index); e != NULL; ) { + Entry* to_remove = e; + // read next before freeing. + e = e->next(); + unlink_entry(to_remove); + FREE_C_HEAP_ARRAY(char, to_remove, mtGC); + } + } + assert(number_of_entries() == 0, "should have removed all entries"); + free_buckets(); + for (BasicHashtableEntry* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) { + FREE_C_HEAP_ARRAY(char, e, mtGC); } } -FreeList G1CodeRootSet::_free_list; -size_t G1CodeRootSet::_num_chunks_handed_out = 0; - -G1CodeRootChunk* G1CodeRootSet::new_chunk() { - G1CodeRootChunk* result = _free_list.get_chunk_at_head(); - if (result == NULL) { - result = new G1CodeRootChunk(); +bool CodeRootSetTable::add(nmethod* nm) { + if (!contains(nm)) { + Entry* e = new_entry(nm); + int index = hash_to_index(e->hash()); + add_entry(index, e); + return true; } - G1CodeRootSet::_num_chunks_handed_out++; - result->reset(); - return result; + return false; } -void G1CodeRootSet::free_chunk(G1CodeRootChunk* chunk) { - _free_list.return_chunk_at_head(chunk); - G1CodeRootSet::_num_chunks_handed_out--; +bool CodeRootSetTable::contains(nmethod* nm) { + int index = hash_to_index(compute_hash(nm)); + for (Entry* e = bucket(index); e != NULL; e = e->next()) { + if (e->literal() == nm) { + return true; + } + } + return false; } -void G1CodeRootSet::free_all_chunks(FreeList* list) { - G1CodeRootSet::_num_chunks_handed_out -= list->count(); - _free_list.prepend(list); +bool CodeRootSetTable::remove(nmethod* nm) { + int index = hash_to_index(compute_hash(nm)); + Entry* previous = NULL; + for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) { + if (e->literal() == nm) { + remove_entry(e, previous); + return true; + } + } + return false; } -void G1CodeRootSet::purge_chunks(size_t keep_ratio) { - size_t keep = G1CodeRootSet::_num_chunks_handed_out * keep_ratio / 100; +void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) { + for (int index = 0; index < table_size(); ++index) { + for (Entry* e = bucket(index); e != NULL; e = e->next()) { + new_table->add(e->literal()); + } + } + new_table->copy_freelist(this); +} - if (keep >= (size_t)_free_list.count()) { - return; - } - - FreeList temp; - temp.initialize(); - temp.set_size(G1CodeRootChunk::word_size()); - - _free_list.getFirstNChunksFromList((size_t)_free_list.count() - keep, &temp); - - G1CodeRootChunk* cur = temp.get_chunk_at_head(); - while (cur != NULL) { - delete cur; - cur = temp.get_chunk_at_head(); +void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) { + for (int index = 0; index < table_size(); ++index) { + for (Entry* e = bucket(index); e != NULL; e = e->next()) { + blk->do_code_blob(e->literal()); + } } } -size_t G1CodeRootSet::static_mem_size() { - return sizeof(_free_list) + sizeof(_num_chunks_handed_out); -} - -size_t G1CodeRootSet::fl_mem_size() { - return _free_list.count() * _free_list.size(); -} - -void G1CodeRootSet::initialize() { - _free_list.initialize(); - _free_list.set_size(G1CodeRootChunk::word_size()); -} - -G1CodeRootSet::G1CodeRootSet() : _list(), _length(0) { - _list.initialize(); - _list.set_size(G1CodeRootChunk::word_size()); +template +int CodeRootSetTable::remove_if(CB& should_remove) { + int num_removed = 0; + for (int index = 0; index < table_size(); ++index) { + Entry* previous = NULL; + Entry* e = bucket(index); + while (e != NULL) { + Entry* next = e->next(); + if (should_remove(e->literal())) { + remove_entry(e, previous); + ++num_removed; + } else { + previous = e; + } + e = next; + } + } + return num_removed; } G1CodeRootSet::~G1CodeRootSet() { - clear(); + delete _table; +} + +CodeRootSetTable* G1CodeRootSet::load_acquire_table() { + return (CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table); +} + +void G1CodeRootSet::allocate_small_table() { + _table = new CodeRootSetTable(SmallSize); +} + +void CodeRootSetTable::purge_list_append(CodeRootSetTable* table) { + for (;;) { + table->_purge_next = _purge_list; + CodeRootSetTable* old = (CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next); + if (old == table->_purge_next) { + break; + } + } +} + +void CodeRootSetTable::purge() { + CodeRootSetTable* table = _purge_list; + _purge_list = NULL; + while (table != NULL) { + CodeRootSetTable* to_purge = table; + table = table->_purge_next; + delete to_purge; + } +} + +void G1CodeRootSet::move_to_large() { + CodeRootSetTable* temp = new CodeRootSetTable(LargeSize); + + _table->copy_to(temp); + + CodeRootSetTable::purge_list_append(_table); + + OrderAccess::release_store_ptr(&_table, temp); +} + + +void G1CodeRootSet::purge() { + CodeRootSetTable::purge(); +} + +size_t G1CodeRootSet::static_mem_size() { + return CodeRootSetTable::static_mem_size(); } void G1CodeRootSet::add(nmethod* method) { - if (!contains(method)) { - // Try to add the nmethod. If there is not enough space, get a new chunk. - if (_list.head() == NULL || _list.head()->is_full()) { - G1CodeRootChunk* cur = new_chunk(); - _list.return_chunk_at_head(cur); - } - bool result = _list.head()->add(method); - guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method)); - _length++; + bool added = false; + if (is_empty()) { + allocate_small_table(); + } + added = _table->add(method); + if (_length == Threshold) { + move_to_large(); + } + if (added) { + ++_length; } } -void G1CodeRootSet::remove(nmethod* method) { - G1CodeRootChunk* found = find(method); - if (found != NULL) { - bool result = found->remove(method); - guarantee(result, err_msg("could not find nmethod "PTR_FORMAT" during removal although we previously found it", method)); - // eventually free completely emptied chunk - if (found->is_empty()) { - _list.remove_chunk(found); - free(found); +bool G1CodeRootSet::remove(nmethod* method) { + bool removed = false; + if (_table != NULL) { + removed = _table->remove(method); + } + if (removed) { + _length--; + if (_length == 0) { + clear(); } - _length--; } - assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method)); -} - -nmethod* G1CodeRootSet::pop() { - do { - G1CodeRootChunk* cur = _list.head(); - if (cur == NULL) { - assert(_length == 0, "when there are no chunks, there should be no elements"); - return NULL; - } - nmethod* result = cur->pop(); - if (result != NULL) { - _length--; - return result; - } else { - free(_list.get_chunk_at_head()); - } - } while (true); -} - -G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) { - G1CodeRootChunk* cur = _list.head(); - while (cur != NULL) { - if (cur->contains(method)) { - return cur; - } - cur = (G1CodeRootChunk*)cur->next(); - } - return NULL; -} - -void G1CodeRootSet::free(G1CodeRootChunk* chunk) { - free_chunk(chunk); + return removed; } bool G1CodeRootSet::contains(nmethod* method) { - return find(method) != NULL; + CodeRootSetTable* table = load_acquire_table(); + if (table != NULL) { + return table->contains(method); + } + return false; } void G1CodeRootSet::clear() { - free_all_chunks(&_list); + delete _table; + _table = NULL; _length = 0; } +size_t G1CodeRootSet::mem_size() { + return sizeof(*this) + + (_table != NULL ? sizeof(CodeRootSetTable) + _table->entry_size() * _length : 0); +} + void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const { - G1CodeRootChunk* cur = _list.head(); - while (cur != NULL) { - cur->nmethods_do(blk); - cur = (G1CodeRootChunk*)cur->next(); + if (_table != NULL) { + _table->nmethods_do(blk); } } -size_t G1CodeRootSet::mem_size() { - return sizeof(this) + _list.count() * _list.size(); +class CleanCallback : public StackObj { + class PointsIntoHRDetectionClosure : public OopClosure { + HeapRegion* _hr; + public: + bool _points_into; + PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {} + + void do_oop(narrowOop* o) { + do_oop_work(o); + } + + void do_oop(oop* o) { + do_oop_work(o); + } + + template + void do_oop_work(T* p) { + if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) { + _points_into = true; + } + } + }; + + PointsIntoHRDetectionClosure _detector; + CodeBlobToOopClosure _blobs; + + public: + CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {} + + bool operator() (nmethod* nm) { + _detector._points_into = false; + _blobs.do_code_blob(nm); + return !_detector._points_into; + } +}; + +void G1CodeRootSet::clean(HeapRegion* owner) { + CleanCallback should_clean(owner); + if (_table != NULL) { + int removed = _table->remove_if(should_clean); + assert((size_t)removed <= _length, "impossible"); + _length -= removed; + } + if (_length == 0) { + clear(); + } } #ifndef PRODUCT -void G1CodeRootSet::test() { - initialize(); +class G1CodeRootSetTest { + public: + static void test() { + { + G1CodeRootSet set1; + assert(set1.is_empty(), "Code root set must be initially empty but is not."); - assert(_free_list.count() == 0, "Free List must be empty"); - assert(_num_chunks_handed_out == 0, "No elements must have been handed out yet"); + assert(G1CodeRootSet::static_mem_size() == sizeof(void*), + err_msg("The code root set's static memory usage is incorrect, "SIZE_FORMAT" bytes", G1CodeRootSet::static_mem_size())); - // The number of chunks that we allocate for purge testing. - size_t const num_chunks = 10; - { - G1CodeRootSet set1; - assert(set1.is_empty(), "Code root set must be initially empty but is not."); + set1.add((nmethod*)1); + assert(set1.length() == 1, err_msg("Added exactly one element, but set contains " + SIZE_FORMAT" elements", set1.length())); - set1.add((nmethod*)1); - assert(_num_chunks_handed_out == 1, - err_msg("Must have allocated and handed out one chunk, but handed out " - SIZE_FORMAT" chunks", _num_chunks_handed_out)); - assert(set1.length() == 1, err_msg("Added exactly one element, but set contains " - SIZE_FORMAT" elements", set1.length())); + const size_t num_to_add = (size_t)G1CodeRootSet::Threshold + 1; - // G1CodeRootChunk::word_size() is larger than G1CodeRootChunk::num_entries which - // we cannot access. - for (uint i = 0; i < G1CodeRootChunk::word_size() + 1; i++) { - set1.add((nmethod*)1); - } - assert(_num_chunks_handed_out == 1, - err_msg("Duplicate detection must have prevented allocation of further " - "chunks but contains "SIZE_FORMAT, _num_chunks_handed_out)); - assert(set1.length() == 1, - err_msg("Duplicate detection should not have increased the set size but " - "is "SIZE_FORMAT, set1.length())); + for (size_t i = 1; i <= num_to_add; i++) { + set1.add((nmethod*)1); + } + assert(set1.length() == 1, + err_msg("Duplicate detection should not have increased the set size but " + "is "SIZE_FORMAT, set1.length())); - size_t num_total_after_add = G1CodeRootChunk::word_size() + 1; - for (size_t i = 0; i < num_total_after_add - 1; i++) { - set1.add((nmethod*)(2 + i)); - } - assert(_num_chunks_handed_out > 1, - "After adding more code roots, more than one chunks should have been handed out"); - assert(set1.length() == num_total_after_add, - err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they " - "need to be in the set, but there are only "SIZE_FORMAT, - num_total_after_add, set1.length())); + for (size_t i = 2; i <= num_to_add; i++) { + set1.add((nmethod*)(uintptr_t)(i)); + } + assert(set1.length() == num_to_add, + err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they " + "need to be in the set, but there are only "SIZE_FORMAT, + num_to_add, set1.length())); - size_t num_popped = 0; - while (set1.pop() != NULL) { - num_popped++; - } - assert(num_popped == num_total_after_add, - err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" " - "were added", num_popped, num_total_after_add)); - assert(_num_chunks_handed_out == 0, - err_msg("After popping all elements, all chunks must have been returned " - "but are still "SIZE_FORMAT, _num_chunks_handed_out)); + assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable"); - purge_chunks(0); - assert(_free_list.count() == 0, - err_msg("After purging everything, the free list must be empty but still " - "contains "SIZE_FORMAT" chunks", _free_list.count())); + size_t num_popped = 0; + for (size_t i = 1; i <= num_to_add; i++) { + bool removed = set1.remove((nmethod*)i); + if (removed) { + num_popped += 1; + } else { + break; + } + } + assert(num_popped == num_to_add, + err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" " + "were added", num_popped, num_to_add)); + assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable"); - // Add some more handed out chunks. - size_t i = 0; - while (_num_chunks_handed_out < num_chunks) { - set1.add((nmethod*)i); - i++; + G1CodeRootSet::purge(); + + assert(CodeRootSetTable::_purge_list == NULL, "should have purged old small tables"); + } - { - // Generate chunks on the free list. - G1CodeRootSet set2; - size_t i = 0; - while (_num_chunks_handed_out < num_chunks * 2) { - set2.add((nmethod*)i); - i++; - } - // Exit of the scope of the set2 object will call the destructor that generates - // num_chunks elements on the free list. - } + } +}; - assert(_num_chunks_handed_out == num_chunks, - err_msg("Deletion of the second set must have resulted in giving back " - "those, but there is still "SIZE_FORMAT" handed out, expecting " - SIZE_FORMAT, _num_chunks_handed_out, num_chunks)); - assert((size_t)_free_list.count() == num_chunks, - err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list " - "but there are only "SIZE_FORMAT, num_chunks, _free_list.count())); - - size_t const test_percentage = 50; - purge_chunks(test_percentage); - assert(_num_chunks_handed_out == num_chunks, - err_msg("Purging must not hand out chunks but there are "SIZE_FORMAT, - _num_chunks_handed_out)); - assert((size_t)_free_list.count() == (ssize_t)(num_chunks * test_percentage / 100), - err_msg("Must have purged "SIZE_FORMAT" percent of "SIZE_FORMAT" chunks" - "but there are "SSIZE_FORMAT, test_percentage, num_chunks, - _free_list.count())); - // Purge the remainder of the chunks on the free list. - purge_chunks(0); - assert(_free_list.count() == 0, "Free List must be empty"); - assert(_num_chunks_handed_out == num_chunks, - err_msg("Expected to be "SIZE_FORMAT" chunks handed out from the first set " - "but there are "SIZE_FORMAT, num_chunks, _num_chunks_handed_out)); - - // Exit of the scope of the set1 object will call the destructor that generates - // num_chunks additional elements on the free list. - } - - assert(_num_chunks_handed_out == 0, - err_msg("Deletion of the only set must have resulted in no chunks handed " - "out, but there is still "SIZE_FORMAT" handed out", _num_chunks_handed_out)); - assert((size_t)_free_list.count() == num_chunks, - err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list " - "but there are only "SSIZE_FORMAT, num_chunks, _free_list.count())); - - // Restore initial state. - purge_chunks(0); - assert(_free_list.count() == 0, "Free List must be empty"); - assert(_num_chunks_handed_out == 0, "No elements must have been handed out yet"); +void TestCodeCacheRemSet_test() { + G1CodeRootSetTest::test(); } -void TestCodeCacheRemSet_test() { - G1CodeRootSet::test(); -} #endif --- ./hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,155 +26,57 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP #include "memory/allocation.hpp" -#include "memory/freeList.hpp" -#include "runtime/globals.hpp" class CodeBlobClosure; - -class G1CodeRootChunk : public CHeapObj { - private: - static const int NUM_ENTRIES = 32; - public: - G1CodeRootChunk* _next; - G1CodeRootChunk* _prev; - - nmethod** _top; - - nmethod* _data[NUM_ENTRIES]; - - nmethod** bottom() const { - return (nmethod**) &(_data[0]); - } - - nmethod** end() const { - return (nmethod**) &(_data[NUM_ENTRIES]); - } - - public: - G1CodeRootChunk(); - ~G1CodeRootChunk() {} - - static size_t word_size() { return (size_t)(align_size_up_(sizeof(G1CodeRootChunk), HeapWordSize) / HeapWordSize); } - - // FreeList "interface" methods - - G1CodeRootChunk* next() const { return _next; } - G1CodeRootChunk* prev() const { return _prev; } - void set_next(G1CodeRootChunk* v) { _next = v; assert(v != this, "Boom");} - void set_prev(G1CodeRootChunk* v) { _prev = v; assert(v != this, "Boom");} - void clear_next() { set_next(NULL); } - void clear_prev() { set_prev(NULL); } - - size_t size() const { return word_size(); } - - void link_next(G1CodeRootChunk* ptr) { set_next(ptr); } - void link_prev(G1CodeRootChunk* ptr) { set_prev(ptr); } - void link_after(G1CodeRootChunk* ptr) { - link_next(ptr); - if (ptr != NULL) ptr->link_prev((G1CodeRootChunk*)this); - } - - bool is_free() { return true; } - - // New G1CodeRootChunk routines - - void reset(); - - bool is_empty() const { - return _top == bottom(); - } - - bool is_full() const { - return _top == (nmethod**)end(); - } - - bool contains(nmethod* method) { - nmethod** cur = bottom(); - while (cur != _top) { - if (*cur == method) return true; - cur++; - } - return false; - } - - bool add(nmethod* method) { - if (is_full()) return false; - *_top = method; - _top++; - return true; - } - - bool remove(nmethod* method) { - nmethod** cur = bottom(); - while (cur != _top) { - if (*cur == method) { - memmove(cur, cur + 1, (_top - (cur + 1)) * sizeof(nmethod**)); - _top--; - return true; - } - cur++; - } - return false; - } - - void nmethods_do(CodeBlobClosure* blk); - - nmethod* pop() { - if (is_empty()) { - return NULL; - } - _top--; - return *_top; - } -}; +class CodeRootSetTable; +class HeapRegion; +class nmethod; // Implements storage for a set of code roots. // All methods that modify the set are not thread-safe except if otherwise noted. class G1CodeRootSet VALUE_OBJ_CLASS_SPEC { + friend class G1CodeRootSetTest; private: - // Global free chunk list management - static FreeList _free_list; - // Total number of chunks handed out - static size_t _num_chunks_handed_out; - static G1CodeRootChunk* new_chunk(); - static void free_chunk(G1CodeRootChunk* chunk); - // Free all elements of the given list. - static void free_all_chunks(FreeList* list); + const static size_t SmallSize = 32; + const static size_t Threshold = 24; + const static size_t LargeSize = 512; - // Return the chunk that contains the given nmethod, NULL otherwise. - // Scans the list of chunks backwards, as this method is used to add new - // entries, which are typically added in bulk for a single nmethod. - G1CodeRootChunk* find(nmethod* method); - void free(G1CodeRootChunk* chunk); + CodeRootSetTable* _table; + CodeRootSetTable* load_acquire_table(); size_t _length; - FreeList _list; + + void move_to_large(); + void allocate_small_table(); public: - G1CodeRootSet(); + G1CodeRootSet() : _table(NULL), _length(0) {} ~G1CodeRootSet(); - static void initialize(); - static void purge_chunks(size_t keep_ratio); + static void purge(); static size_t static_mem_size(); - static size_t fl_mem_size(); - // Search for the code blob from the recently allocated ones to find duplicates more quickly, as this - // method is likely to be repeatedly called with the same nmethod. void add(nmethod* method); - void remove(nmethod* method); - nmethod* pop(); + bool remove(nmethod* method); + // Safe to call without synchronization, but may return false negatives. bool contains(nmethod* method); void clear(); void nmethods_do(CodeBlobClosure* blk) const; - bool is_empty() { return length() == 0; } + // Remove all nmethods which no longer contain pointers into our "owner" region + void clean(HeapRegion* owner); + + bool is_empty() { + bool empty = length() == 0; + assert(empty == (_table == NULL), "is empty only if table is deallocated"); + return empty; + } // Length in elements size_t length() const { return _length; } @@ -182,7 +84,6 @@ // Memory size in bytes taken by this set. size_t mem_size(); - static void test() PRODUCT_RETURN; }; #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -27,6 +27,7 @@ #endif #include "precompiled.hpp" +#include "classfile/metadataOnStackMark.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" #include "gc_implementation/g1/bufferingOopClosure.hpp" @@ -42,26 +43,29 @@ #include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1MarkSweep.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" +#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp" +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" #include "gc_implementation/g1/g1StringDedup.hpp" #include "gc_implementation/g1/g1YCTypes.hpp" #include "gc_implementation/g1/heapRegion.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionSet.inline.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" #include "gc_implementation/shared/gcHeapSummary.hpp" #include "gc_implementation/shared/gcTimer.hpp" #include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" +#include "memory/allocation.hpp" #include "memory/gcLocker.inline.hpp" #include "memory/generationSpec.hpp" #include "memory/iterator.hpp" #include "memory/referenceProcessor.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/vmThread.hpp" -#include "utilities/ticks.hpp" size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; @@ -86,66 +90,64 @@ // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism. // The number of GC workers is passed to heap_region_par_iterate_chunked(). // It does use run_task() which sets _n_workers in the task. -// G1ParTask executes g1_process_strong_roots() -> -// SharedHeap::process_strong_roots() which calls eventually to +// G1ParTask executes g1_process_roots() -> +// SharedHeap::process_roots() which calls eventually to // CardTableModRefBS::par_non_clean_card_iterate_work() which uses -// SequentialSubTasksDone. SharedHeap::process_strong_roots() also +// SequentialSubTasksDone. SharedHeap::process_roots() also // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). // // Local to this file. class RefineCardTableEntryClosure: public CardTableEntryClosure { - SuspendibleThreadSet* _sts; - G1RemSet* _g1rs; - ConcurrentG1Refine* _cg1r; bool _concurrent; public: - RefineCardTableEntryClosure(SuspendibleThreadSet* sts, - G1RemSet* g1rs, - ConcurrentG1Refine* cg1r) : - _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) - {} + RefineCardTableEntryClosure() : _concurrent(true) { } + bool do_card_ptr(jbyte* card_ptr, uint worker_i) { - bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false); + bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false); // This path is executed by the concurrent refine or mutator threads, // concurrently, and so we do not care if card_ptr contains references // that point into the collection set. assert(!oops_into_cset, "should be"); - if (_concurrent && _sts->should_yield()) { + if (_concurrent && SuspendibleThreadSet::should_yield()) { // Caller will actually yield. return false; } // Otherwise, we finished successfully; return true. return true; } + void set_concurrent(bool b) { _concurrent = b; } }; class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { - int _calls; - G1CollectedHeap* _g1h; + size_t _num_processed; CardTableModRefBS* _ctbs; int _histo[256]; -public: + + public: ClearLoggedCardTableEntryClosure() : - _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) + _num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set()) { for (int i = 0; i < 256; i++) _histo[i] = 0; } + bool do_card_ptr(jbyte* card_ptr, uint worker_i) { - if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { - _calls++; - unsigned char* ujb = (unsigned char*)card_ptr; - int ind = (int)(*ujb); - _histo[ind]++; - *card_ptr = -1; - } + unsigned char* ujb = (unsigned char*)card_ptr; + int ind = (int)(*ujb); + _histo[ind]++; + + *card_ptr = (jbyte)CardTableModRefBS::clean_card_val(); + _num_processed++; + return true; } - int calls() { return _calls; } + + size_t num_processed() { return _num_processed; } + void print_histo() { gclog_or_tty->print_cr("Card table value histogram:"); for (int i = 0; i < 256; i++) { @@ -156,22 +158,20 @@ } }; -class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { - int _calls; - G1CollectedHeap* _g1h; - CardTableModRefBS* _ctbs; -public: - RedirtyLoggedCardTableEntryClosure() : - _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {} +class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure { + private: + size_t _num_processed; + + public: + RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { } bool do_card_ptr(jbyte* card_ptr, uint worker_i) { - if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { - _calls++; - *card_ptr = 0; - } + *card_ptr = CardTableModRefBS::dirty_card_val(); + _num_processed++; return true; } - int calls() { return _calls; } + + size_t num_processed() const { return _num_processed; } }; YoungList::YoungList(G1CollectedHeap* g1h) : @@ -208,7 +208,10 @@ HeapRegion* next = list->get_next_young_region(); list->set_next_young_region(NULL); list->uninstall_surv_rate_group(); - list->set_not_young(); + // This is called before a Full GC and all the non-empty / + // non-humongous regions at the end of the Full GC will end up as + // old anyway. + list->set_old(); list = next; } } @@ -367,7 +370,7 @@ if (curr == NULL) gclog_or_tty->print_cr(" empty"); while (curr != NULL) { - gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d", + gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d", HR_FORMAT_PARAMS(curr), curr->prev_top_at_mark_start(), curr->next_top_at_mark_start(), @@ -379,6 +382,16 @@ gclog_or_tty->cr(); } +void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) { + OtherRegionsTable::invalidate(start_idx, num_regions); +} + +void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { + // The from card cache is not the memory that is actually committed. So we cannot + // take advantage of the zero_filled parameter. + reset_from_card_cache(start_idx, num_regions); +} + void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) { // Claim the right to put the region on the dirty cards region list @@ -444,24 +457,18 @@ // implementation of is_scavengable() for G1 will indicate that // all nmethods must be scanned during a partial collection. bool G1CollectedHeap::is_in_partial_collection(const void* p) { - HeapRegion* hr = heap_region_containing(p); - return hr != NULL && hr->in_collection_set(); + if (p == NULL) { + return false; + } + return heap_region_containing(p)->in_collection_set(); } #endif // Returns true if the reference points to an object that // can move in an incremental collection. bool G1CollectedHeap::is_scavengable(const void* p) { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1CollectorPolicy* g1p = g1h->g1_policy(); HeapRegion* hr = heap_region_containing(p); - if (hr == NULL) { - // null - assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p)); - return false; - } else { - return !hr->isHumongous(); - } + return !hr->isHumongous(); } void G1CollectedHeap::check_ct_logs_at_safepoint() { @@ -475,9 +482,8 @@ // First clear the logged cards. ClearLoggedCardTableEntryClosure clear; - dcqs.set_closure(&clear); - dcqs.apply_closure_to_all_completed_buffers(); - dcqs.iterate_closure_all_threads(false); + dcqs.apply_closure_to_all_completed_buffers(&clear); + dcqs.iterate_closure_all_threads(&clear, false); clear.print_histo(); // Now ensure that there's no dirty cards. @@ -490,13 +496,13 @@ guarantee(count2.n() == 0, "Card table should be clean."); RedirtyLoggedCardTableEntryClosure redirty; - JavaThread::dirty_card_queue_set().set_closure(&redirty); - dcqs.apply_closure_to_all_completed_buffers(); - dcqs.iterate_closure_all_threads(false); + dcqs.apply_closure_to_all_completed_buffers(&redirty); + dcqs.iterate_closure_all_threads(&redirty, false); gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", - clear.calls(), orig_count); - guarantee(redirty.calls() == clear.calls(), - "Or else mechanism is broken."); + clear.num_processed(), orig_count); + guarantee(redirty.num_processed() == clear.num_processed(), + err_msg("Redirtied "SIZE_FORMAT" cards, bug cleared "SIZE_FORMAT, + redirty.num_processed(), clear.num_processed())); CountNonCleanMemRegionClosure count3(this); ct_bs->mod_card_iterate(&count3); @@ -505,8 +511,6 @@ orig_count, count3.n()); guarantee(count3.n() >= orig_count, "Should have restored them all."); } - - JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); } // Private class members. @@ -530,9 +534,9 @@ // again to allocate from it. append_secondary_free_list(); - assert(!_free_list.is_empty(), "if the secondary_free_list was not " + assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not " "empty we should have moved at least one entry to the free_list"); - HeapRegion* res = _free_list.remove_region(is_old); + HeapRegion* res = _hrm.allocate_free_region(is_old); if (G1ConcRegionFreeingVerbose) { gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "allocated "HR_FORMAT" from secondary_free_list", @@ -573,7 +577,7 @@ } } - res = _free_list.remove_region(is_old); + res = _hrm.allocate_free_region(is_old); if (res == NULL) { if (G1ConcRegionFreeingVerbose) { @@ -598,8 +602,8 @@ // Given that expand() succeeded in expanding the heap, and we // always expand the heap by an amount aligned to the heap // region size, the free list should in theory not be empty. - // In either case remove_region() will check for NULL. - res = _free_list.remove_region(is_old); + // In either case allocate_free_region() will check for NULL. + res = _hrm.allocate_free_region(is_old); } else { _expand_heap_after_alloc_failure = false; } @@ -607,55 +611,12 @@ return res; } -uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions, - size_t word_size) { - assert(isHumongous(word_size), "word_size should be humongous"); - assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); - - uint first = G1_NULL_HRS_INDEX; - if (num_regions == 1) { - // Only one region to allocate, no need to go through the slower - // path. The caller will attempt the expansion if this fails, so - // let's not try to expand here too. - HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */); - if (hr != NULL) { - first = hr->hrs_index(); - } else { - first = G1_NULL_HRS_INDEX; - } - } else { - // We can't allocate humongous regions while cleanupComplete() is - // running, since some of the regions we find to be empty might not - // yet be added to the free list and it is not straightforward to - // know which list they are on so that we can remove them. Note - // that we only need to do this if we need to allocate more than - // one region to satisfy the current humongous allocation - // request. If we are only allocating one region we use the common - // region allocation code (see above). - wait_while_free_regions_coming(); - append_secondary_free_list_if_not_empty_with_lock(); - - if (free_regions() >= num_regions) { - first = _hrs.find_contiguous(num_regions); - if (first != G1_NULL_HRS_INDEX) { - for (uint i = first; i < first + num_regions; ++i) { - HeapRegion* hr = region_at(i); - assert(hr->is_empty(), "sanity"); - assert(is_on_master_free_list(hr), "sanity"); - hr->set_pending_removal(true); - } - _free_list.remove_all_pending(num_regions); - } - } - } - return first; -} - HeapWord* G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first, uint num_regions, - size_t word_size) { - assert(first != G1_NULL_HRS_INDEX, "pre-condition"); + size_t word_size, + AllocationContext_t context) { + assert(first != G1_NO_HRM_INDEX, "pre-condition"); assert(isHumongous(word_size), "word_size should be humongous"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); @@ -706,13 +667,14 @@ // that there is a single object that starts at the bottom of the // first region. first_hr->set_startsHumongous(new_top, new_end); - + first_hr->set_allocation_context(context); // Then, if there are any, we will set up the "continues // humongous" regions. HeapRegion* hr = NULL; for (uint i = first + 1; i < last; ++i) { hr = region_at(i); hr->set_continuesHumongous(first_hr); + hr->set_allocation_context(context); } // If we have "continues humongous" regions (hr != NULL), then the // end of the last one should match new_end. @@ -776,9 +738,10 @@ // match new_top. assert(hr == NULL || (hr->end() == new_end && hr->top() == new_top), "sanity"); + check_bitmaps("Humongous Region Allocation", first_hr); assert(first_hr->used() == word_size * HeapWordSize, "invariant"); - _summary_bytes_used += first_hr->used(); + _allocator->increase_used(first_hr->used()); _humongous_set.add(first_hr); return new_obj; @@ -787,47 +750,77 @@ // If could fit into free regions w/o expansion, try. // Otherwise, if can expand, do so. // Otherwise, if using ex regions might help, try with ex given back. -HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { +HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); verify_region_sets_optional(); - size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords); - uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords); - uint x_num = expansion_regions(); - uint fs = _hrs.free_suffix(); - uint first = humongous_obj_allocate_find_first(num_regions, word_size); - if (first == G1_NULL_HRS_INDEX) { - // The only thing we can do now is attempt expansion. - if (fs + x_num >= num_regions) { - // If the number of regions we're trying to allocate for this - // object is at most the number of regions in the free suffix, - // then the call to humongous_obj_allocate_find_first() above - // should have succeeded and we wouldn't be here. - // - // We should only be trying to expand when the free suffix is - // not sufficient for the object _and_ we have some expansion - // room available. - assert(num_regions > fs, "earlier allocation should have succeeded"); - + uint first = G1_NO_HRM_INDEX; + uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords); + + if (obj_regions == 1) { + // Only one region to allocate, try to use a fast path by directly allocating + // from the free lists. Do not try to expand here, we will potentially do that + // later. + HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */); + if (hr != NULL) { + first = hr->hrm_index(); + } + } else { + // We can't allocate humongous regions spanning more than one region while + // cleanupComplete() is running, since some of the regions we find to be + // empty might not yet be added to the free list. It is not straightforward + // to know in which list they are on so that we can remove them. We only + // need to do this if we need to allocate more than one region to satisfy the + // current humongous allocation request. If we are only allocating one region + // we use the one-region region allocation code (see above), that already + // potentially waits for regions from the secondary free list. + wait_while_free_regions_coming(); + append_secondary_free_list_if_not_empty_with_lock(); + + // Policy: Try only empty regions (i.e. already committed first). Maybe we + // are lucky enough to find some. + first = _hrm.find_contiguous_only_empty(obj_regions); + if (first != G1_NO_HRM_INDEX) { + _hrm.allocate_free_regions_starting_at(first, obj_regions); + } + } + + if (first == G1_NO_HRM_INDEX) { + // Policy: We could not find enough regions for the humongous object in the + // free list. Look through the heap to find a mix of free and uncommitted regions. + // If so, try expansion. + first = _hrm.find_contiguous_empty_or_unavailable(obj_regions); + if (first != G1_NO_HRM_INDEX) { + // We found something. Make sure these regions are committed, i.e. expand + // the heap. Alternatively we could do a defragmentation GC. ergo_verbose1(ErgoHeapSizing, "attempt heap expansion", ergo_format_reason("humongous allocation request failed") ergo_format_byte("allocation request"), word_size * HeapWordSize); - if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { - // Even though the heap was expanded, it might not have - // reached the desired size. So, we cannot assume that the - // allocation will succeed. - first = humongous_obj_allocate_find_first(num_regions, word_size); + + _hrm.expand_at(first, obj_regions); + g1_policy()->record_new_heap_size(num_regions()); + +#ifdef ASSERT + for (uint i = first; i < first + obj_regions; ++i) { + HeapRegion* hr = region_at(i); + assert(hr->is_free(), "sanity"); + assert(hr->is_empty(), "sanity"); + assert(is_on_master_free_list(hr), "sanity"); } +#endif + _hrm.allocate_free_regions_starting_at(first, obj_regions); + } else { + // Policy: Potentially trigger a defragmentation GC. } } HeapWord* result = NULL; - if (first != G1_NULL_HRS_INDEX) { - result = - humongous_obj_allocate_initialize_regions(first, num_regions, word_size); + if (first != G1_NO_HRM_INDEX) { + result = humongous_obj_allocate_initialize_regions(first, obj_regions, + word_size, context); assert(result != NULL, "it should always return a valid result"); // A successful humongous object allocation changes the used space @@ -871,6 +864,8 @@ // Create the garbage collection operation... VM_G1CollectForAllocation op(gc_count_before, word_size); + op.set_allocation_context(AllocationContext::current()); + // ...and get the VM thread to execute it. VMThread::execute(&op); @@ -906,8 +901,9 @@ } HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, - unsigned int *gc_count_before_ret, - int* gclocker_retry_count_ret) { + AllocationContext_t context, + unsigned int *gc_count_before_ret, + int* gclocker_retry_count_ret) { // Make sure you read the note in attempt_allocation_humongous(). assert_heap_not_locked_and_not_at_safepoint(); @@ -928,23 +924,22 @@ { MutexLockerEx x(Heap_lock); - - result = _mutator_alloc_region.attempt_allocation_locked(word_size, - false /* bot_updates */); + result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size, + false /* bot_updates */); if (result != NULL) { return result; } // If we reach here, attempt_allocation_locked() above failed to // allocate a new region. So the mutator alloc region should be NULL. - assert(_mutator_alloc_region.get() == NULL, "only way to get here"); + assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here"); if (GC_locker::is_active_and_needs_gc()) { if (g1_policy()->can_expand_young_list()) { // No need for an ergo verbose message here, // can_expand_young_list() does this when it returns true. - result = _mutator_alloc_region.attempt_allocation_force(word_size, - false /* bot_updates */); + result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size, + false /* bot_updates */); if (result != NULL) { return result; } @@ -1004,8 +999,8 @@ // first attempt (without holding the Heap_lock) here and the // follow-on attempt will be at the start of the next loop // iteration (after taking the Heap_lock). - result = _mutator_alloc_region.attempt_allocation(word_size, - false /* bot_updates */); + result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size, + false /* bot_updates */); if (result != NULL) { return result; } @@ -1023,8 +1018,8 @@ } HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, - unsigned int * gc_count_before_ret, - int* gclocker_retry_count_ret) { + unsigned int * gc_count_before_ret, + int* gclocker_retry_count_ret) { // The structure of this method has a lot of similarities to // attempt_allocation_slow(). The reason these two were not merged // into a single one is that such a method would require several "if @@ -1065,7 +1060,7 @@ // Given that humongous objects are not allocated in young // regions, we'll first try to do the allocation without doing a // collection hoping that there's enough space in the heap. - result = humongous_obj_allocate(word_size); + result = humongous_obj_allocate(word_size, AllocationContext::current()); if (result != NULL) { return result; } @@ -1141,17 +1136,18 @@ } HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, - bool expect_null_mutator_alloc_region) { + AllocationContext_t context, + bool expect_null_mutator_alloc_region) { assert_at_safepoint(true /* should_be_vm_thread */); - assert(_mutator_alloc_region.get() == NULL || + assert(_allocator->mutator_alloc_region(context)->get() == NULL || !expect_null_mutator_alloc_region, "the current alloc region was unexpectedly found to be non-NULL"); if (!isHumongous(word_size)) { - return _mutator_alloc_region.attempt_allocation_locked(word_size, + return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */); } else { - HeapWord* result = humongous_obj_allocate(word_size); + HeapWord* result = humongous_obj_allocate(word_size, context); if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) { g1_policy()->set_initiate_conc_mark_if_possible(); } @@ -1238,21 +1234,21 @@ public: bool doHeapRegion(HeapRegion* hr) { assert(!hr->is_young(), "not expecting to find young regions"); - // We only generate output for non-empty regions. - if (!hr->is_empty()) { - if (!hr->isHumongous()) { - _hr_printer->post_compaction(hr, G1HRPrinter::Old); - } else if (hr->startsHumongous()) { - if (hr->region_num() == 1) { - // single humongous region - _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous); - } else { - _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous); - } + if (hr->is_free()) { + // We only generate output for non-empty regions. + } else if (hr->startsHumongous()) { + if (hr->region_num() == 1) { + // single humongous region + _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous); } else { - assert(hr->continuesHumongous(), "only way to get here"); - _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous); + _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous); } + } else if (hr->continuesHumongous()) { + _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous); + } else if (hr->is_old()) { + _hr_printer->post_compaction(hr, G1HRPrinter::Old); + } else { + ShouldNotReachHere(); } return false; } @@ -1261,7 +1257,7 @@ : _hr_printer(hr_printer) { } }; -void G1CollectedHeap::print_hrs_post_compaction() { +void G1CollectedHeap::print_hrm_post_compaction() { PostCompactionPrinterClosure cl(hr_printer()); heap_region_iterate(&cl); } @@ -1305,7 +1301,7 @@ TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); { - GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); + GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id()); TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); @@ -1334,6 +1330,7 @@ verify_before_gc(); + check_bitmaps("Full GC Start"); pre_full_gc_dump(gc_timer); COMPILER2_PRESENT(DerivedPointerTable::clear()); @@ -1350,8 +1347,8 @@ concurrent_mark()->abort(); // Make sure we'll choose a new allocation region afterwards. - release_mutator_alloc_region(); - abandon_gc_alloc_regions(); + _allocator->release_mutator_alloc_region(); + _allocator->abandon_gc_alloc_regions(); g1_rem_set()->cleanupHRRS(); // We should call this after we retire any currently active alloc @@ -1389,7 +1386,7 @@ G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); } - assert(free_regions() == 0, "we should not have added any free regions"); + assert(num_free_regions() == 0, "we should not have added any free regions"); rebuild_region_sets(false /* free_list_only */); // Enqueue any discovered reference objects that have @@ -1429,7 +1426,7 @@ // that all the COMMIT / UNCOMMIT events are generated before // the end GC event. - print_hrs_post_compaction(); + print_hrm_post_compaction(); _hr_printer.end_gc(true /* full */, (size_t) total_collections()); } @@ -1489,9 +1486,7 @@ // Discard all rset updates JavaThread::dirty_card_queue_set().abandon_logs(); - assert(!G1DeferredRSUpdate - || (G1DeferredRSUpdate && - (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); + assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty"); _young_list->reset_sampled_info(); // At this point there should be no regions in the @@ -1502,21 +1497,30 @@ // Update the number of full collections that have been completed. increment_old_marking_cycles_completed(false /* concurrent */); - _hrs.verify_optional(); + _hrm.verify_optional(); verify_region_sets_optional(); verify_after_gc(); + // Clear the previous marking bitmap, if needed for bitmap verification. + // Note we cannot do this when we clear the next marking bitmap in + // ConcurrentMark::abort() above since VerifyDuringGC verifies the + // objects marked during a full GC against the previous bitmap. + // But we need to clear it before calling check_bitmaps below since + // the full GC has compacted objects and updated TAMS but not updated + // the prev bitmap. + if (G1VerifyBitmaps) { + ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll(); + } + check_bitmaps("Full GC End"); + // Start a new incremental collection set for the next pause assert(g1_policy()->collection_set() == NULL, "must be"); g1_policy()->start_incremental_cset_building(); - // Clear the _cset_fast_test bitmap in anticipation of adding - // regions to the incremental collection set for the next - // evacuation pause. clear_cset_fast_test(); - init_mutator_alloc_region(); + _allocator->init_mutator_alloc_region(); double end = os::elapsedTime(); g1_policy()->record_full_collection_end(); @@ -1652,6 +1656,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size, + AllocationContext_t context, bool* succeeded) { assert_at_safepoint(true /* should_be_vm_thread */); @@ -1659,7 +1664,8 @@ // Let's attempt the allocation first. HeapWord* result = attempt_allocation_at_safepoint(word_size, - false /* expect_null_mutator_alloc_region */); + context, + false /* expect_null_mutator_alloc_region */); if (result != NULL) { assert(*succeeded, "sanity"); return result; @@ -1669,7 +1675,7 @@ // incremental pauses. Therefore, at least for now, we'll favor // expansion over collection. (This might change in the future if we can // do something smarter than full collection to satisfy a failed alloc.) - result = expand_and_allocate(word_size); + result = expand_and_allocate(word_size, context); if (result != NULL) { assert(*succeeded, "sanity"); return result; @@ -1686,7 +1692,8 @@ // Retry the allocation result = attempt_allocation_at_safepoint(word_size, - true /* expect_null_mutator_alloc_region */); + context, + true /* expect_null_mutator_alloc_region */); if (result != NULL) { assert(*succeeded, "sanity"); return result; @@ -1703,7 +1710,8 @@ // Retry the allocation once more result = attempt_allocation_at_safepoint(word_size, - true /* expect_null_mutator_alloc_region */); + context, + true /* expect_null_mutator_alloc_region */); if (result != NULL) { assert(*succeeded, "sanity"); return result; @@ -1725,7 +1733,7 @@ // successful, perform the allocation and return the address of the // allocated block, or else "NULL". -HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { +HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) { assert_at_safepoint(true /* should_be_vm_thread */); verify_region_sets_optional(); @@ -1737,29 +1745,15 @@ ergo_format_byte("allocation request"), word_size * HeapWordSize); if (expand(expand_bytes)) { - _hrs.verify_optional(); + _hrm.verify_optional(); verify_region_sets_optional(); return attempt_allocation_at_safepoint(word_size, - false /* expect_null_mutator_alloc_region */); + context, + false /* expect_null_mutator_alloc_region */); } return NULL; } -void G1CollectedHeap::update_committed_space(HeapWord* old_end, - HeapWord* new_end) { - assert(old_end != new_end, "don't call this otherwise"); - assert((HeapWord*) _g1_storage.high() == new_end, "invariant"); - - // Update the committed mem region. - _g1_committed.set_end(new_end); - // Tell the card table about the update. - Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); - // Tell the BOT about the update. - _bot_shared->resize(_g1_committed.word_size()); - // Tell the hot card cache about the update - _cg1r->hot_card_cache()->resize_card_counts(capacity()); -} - bool G1CollectedHeap::expand(size_t expand_bytes) { size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aligned_expand_bytes = align_size_up(aligned_expand_bytes, @@ -1770,55 +1764,22 @@ ergo_format_byte("attempted expansion amount"), expand_bytes, aligned_expand_bytes); - if (_g1_storage.uncommitted_size() == 0) { + if (is_maximal_no_gc()) { ergo_verbose0(ErgoHeapSizing, "did not expand the heap", ergo_format_reason("heap already fully expanded")); return false; } - // First commit the memory. - HeapWord* old_end = (HeapWord*) _g1_storage.high(); - bool successful = _g1_storage.expand_by(aligned_expand_bytes); - if (successful) { - // Then propagate this update to the necessary data structures. - HeapWord* new_end = (HeapWord*) _g1_storage.high(); - update_committed_space(old_end, new_end); - - FreeRegionList expansion_list("Local Expansion List"); - MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list); - assert(mr.start() == old_end, "post-condition"); - // mr might be a smaller region than what was requested if - // expand_by() was unable to allocate the HeapRegion instances - assert(mr.end() <= new_end, "post-condition"); - - size_t actual_expand_bytes = mr.byte_size(); + uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes); + assert(regions_to_expand > 0, "Must expand by at least one region"); + + uint expanded_by = _hrm.expand_by(regions_to_expand); + + if (expanded_by > 0) { + size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); - assert(actual_expand_bytes == expansion_list.total_capacity_bytes(), - "post-condition"); - if (actual_expand_bytes < aligned_expand_bytes) { - // We could not expand _hrs to the desired size. In this case we - // need to shrink the committed space accordingly. - assert(mr.end() < new_end, "invariant"); - - size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes; - // First uncommit the memory. - _g1_storage.shrink_by(diff_bytes); - // Then propagate this update to the necessary data structures. - update_committed_space(new_end, mr.end()); - } - _free_list.add_as_tail(&expansion_list); - - if (_hr_printer.is_active()) { - HeapWord* curr = mr.start(); - while (curr < mr.end()) { - HeapWord* curr_end = curr + HeapRegion::GrainWords; - _hr_printer.commit(curr, curr_end); - curr = curr_end; - } - assert(curr == mr.end(), "post-condition"); - } - g1_policy()->record_new_heap_size(n_regions()); + g1_policy()->record_new_heap_size(num_regions()); } else { ergo_verbose0(ErgoHeapSizing, "did not expand the heap", @@ -1826,12 +1787,12 @@ // The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap. if (G1ExitOnExpansionFailure && - _g1_storage.uncommitted_size() >= aligned_expand_bytes) { + _hrm.available() >= regions_to_expand) { // We had head room... vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion"); } } - return successful; + return regions_to_expand > 0; } void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { @@ -1841,8 +1802,7 @@ HeapRegion::GrainBytes); uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); - uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove); - HeapWord* old_end = (HeapWord*) _g1_storage.high(); + uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove); size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; ergo_verbose3(ErgoHeapSizing, @@ -1852,22 +1812,7 @@ ergo_format_byte("attempted shrinking amount"), shrink_bytes, aligned_shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { - _g1_storage.shrink_by(shrunk_bytes); - HeapWord* new_end = (HeapWord*) _g1_storage.high(); - - if (_hr_printer.is_active()) { - HeapWord* curr = old_end; - while (curr > new_end) { - HeapWord* curr_end = curr; - curr -= HeapRegion::GrainWords; - _hr_printer.uncommit(curr, curr_end); - } - } - - _expansion_regions += num_regions_removed; - update_committed_space(old_end, new_end); - HeapRegionRemSet::shrink_heap(n_regions()); - g1_policy()->record_new_heap_size(n_regions()); + g1_policy()->record_new_heap_size(num_regions()); } else { ergo_verbose0(ErgoHeapSizing, "did not shrink the heap", @@ -1881,7 +1826,7 @@ // We should only reach here at the end of a Full GC which means we // should not not be holding to any GC alloc regions. The method // below will make sure of that and do any remaining clean up. - abandon_gc_alloc_regions(); + _allocator->abandon_gc_alloc_regions(); // Instead of tearing down / rebuilding the free lists here, we // could instead use the remove_all_pending() method on free_list to @@ -1890,7 +1835,7 @@ shrink_helper(shrink_bytes); rebuild_region_sets(true /* free_list_only */); - _hrs.verify_optional(); + _hrm.verify_optional(); verify_region_sets_optional(); } @@ -1914,18 +1859,18 @@ _bot_shared(NULL), _evac_failure_scan_stack(NULL), _mark_in_progress(false), - _cg1r(NULL), _summary_bytes_used(0), + _cg1r(NULL), _g1mm(NULL), _refine_cte_cl(NULL), _full_collection(false), - _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()), _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), + _humongous_is_live(), + _has_humongous_reclaim_candidates(false), _free_regions_coming(false), _young_list(new YoungList(this)), _gc_time_stamp(0), - _retained_old_gc_alloc_region(NULL), _survivor_plab_stats(YoungPLABSize, PLABWeight), _old_plab_stats(OldPLABSize, PLABWeight), _expand_heap_after_alloc_failure(true), @@ -1933,8 +1878,7 @@ _old_marking_cycles_started(0), _old_marking_cycles_completed(0), _concurrent_cycle_started(false), - _in_cset_fast_test(NULL), - _in_cset_fast_test_base(NULL), + _in_cset_fast_test(), _dirty_cards_region_list(NULL), _worker_cset_start_region(NULL), _worker_cset_start_region_time_stamp(NULL), @@ -1948,6 +1892,7 @@ vm_exit_during_initialization("Failed necessary allocation."); } + _allocator = G1Allocator::create_allocator(_g1h); _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; int n_queues = MAX2((int)ParallelGCThreads, 1); @@ -2004,7 +1949,9 @@ Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap"); - _cg1r = new ConcurrentG1Refine(this); + _refine_cte_cl = new RefineCardTableEntryClosure(); + + _cg1r = new ConcurrentG1Refine(this, _refine_cte_cl); // Reserve the maximum. @@ -2029,8 +1976,6 @@ _reserved.set_start((HeapWord*)heap_rs.base()); _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); - _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes); - // Create the gen rem set (and barrier set) for the entire reserved region. _rem_set = collector_policy()->create_rem_set(_reserved, 2); set_barrier_set(rem_set()->bs()); @@ -2044,20 +1989,65 @@ // Carve out the G1 part of the heap. - ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); - _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), - g1_rs.size()/HeapWordSize); - - _g1_storage.initialize(g1_rs, 0); - _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); - _hrs.initialize((HeapWord*) _g1_reserved.start(), - (HeapWord*) _g1_reserved.end()); - assert(_hrs.max_length() == _expansion_regions, - err_msg("max length: %u expansion regions: %u", - _hrs.max_length(), _expansion_regions)); - - // Do later initialization work for concurrent refinement. - _cg1r->init(); + ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); + G1RegionToSpaceMapper* heap_storage = + G1RegionToSpaceMapper::create_mapper(g1_rs, + UseLargePages ? os::large_page_size() : os::vm_page_size(), + HeapRegion::GrainBytes, + 1, + mtJavaHeap); + heap_storage->set_mapping_changed_listener(&_listener); + + // Reserve space for the block offset table. We do not support automatic uncommit + // for the card table at this time. BOT only. + ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); + G1RegionToSpaceMapper* bot_storage = + G1RegionToSpaceMapper::create_mapper(bot_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + G1BlockOffsetSharedArray::N_bytes, + mtGC); + + ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize)); + G1RegionToSpaceMapper* cardtable_storage = + G1RegionToSpaceMapper::create_mapper(cardtable_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + G1BlockOffsetSharedArray::N_bytes, + mtGC); + + // Reserve space for the card counts table. + ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); + G1RegionToSpaceMapper* card_counts_storage = + G1RegionToSpaceMapper::create_mapper(card_counts_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + G1BlockOffsetSharedArray::N_bytes, + mtGC); + + // Reserve space for prev and next bitmap. + size_t bitmap_size = CMBitMap::compute_size(g1_rs.size()); + + ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); + G1RegionToSpaceMapper* prev_bitmap_storage = + G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + CMBitMap::mark_distance(), + mtGC); + + ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); + G1RegionToSpaceMapper* next_bitmap_storage = + G1RegionToSpaceMapper::create_mapper(next_bitmap_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + CMBitMap::mark_distance(), + mtGC); + + _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); + g1_barrier_set()->initialize(cardtable_storage); + // Do later initialization work for concurrent refinement. + _cg1r->init(card_counts_storage); // 6843694 - ensure that the maximum region index can fit // in the remembered set structures. @@ -2071,29 +2061,16 @@ FreeRegionList::set_unrealistically_long_length(max_regions() + 1); - _bot_shared = new G1BlockOffsetSharedArray(_reserved, - heap_word_size(init_byte_size)); + _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage); _g1h = this; - _in_cset_fast_test_length = max_regions(); - _in_cset_fast_test_base = - NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC); - - // We're biasing _in_cset_fast_test to avoid subtracting the - // beginning of the heap every time we want to index; basically - // it's the same with what we do with the card table. - _in_cset_fast_test = _in_cset_fast_test_base - - ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); - - // Clear the _cset_fast_test bitmap in anticipation of adding - // regions to the incremental collection set for the first - // evacuation pause. - clear_cset_fast_test(); + _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes); + _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes); // Create the ConcurrentMark data structure and thread. // (Must do this late, so that "max_regions" is defined.) - _cm = new ConcurrentMark(this, heap_rs); + _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage); if (_cm == NULL || !_cm->completed_initialization()) { vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark"); return JNI_ENOMEM; @@ -2112,35 +2089,30 @@ // Perform any initialization actions delegated to the policy. g1_policy()->init(); - _refine_cte_cl = - new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), - g1_rem_set(), - concurrent_g1_refine()); - JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); - JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, SATB_Q_FL_lock, G1SATBProcessCompletedThreshold, Shared_SATB_Q_lock); - JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, + JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl, + DirtyCardQ_CBL_mon, DirtyCardQ_FL_lock, concurrent_g1_refine()->yellow_zone(), concurrent_g1_refine()->red_zone(), Shared_DirtyCardQ_lock); - if (G1DeferredRSUpdate) { - dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, - DirtyCardQ_FL_lock, - -1, // never trigger processing - -1, // no limit on length - Shared_DirtyCardQ_lock, - &JavaThread::dirty_card_queue_set()); - } + dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code + DirtyCardQ_CBL_mon, + DirtyCardQ_FL_lock, + -1, // never trigger processing + -1, // no limit on length + Shared_DirtyCardQ_lock, + &JavaThread::dirty_card_queue_set()); // Initialize the card queue set used to hold cards containing // references into the collection set. - _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, + _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code + DirtyCardQ_CBL_mon, DirtyCardQ_FL_lock, -1, // never trigger processing -1, // no limit on length @@ -2151,22 +2123,20 @@ // counts and that mechanism. SpecializationStats::clear(); - // Here we allocate the dummy full region that is required by the - // G1AllocRegion class. If we don't pass an address in the reserved - // space here, lots of asserts fire. - - HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */, - _g1_reserved.start()); + // Here we allocate the dummy HeapRegion that is required by the + // G1AllocRegion class. + HeapRegion* dummy_region = _hrm.get_dummy_region(); + // We'll re-use the same region whether the alloc region will // require BOT updates or not and, if it doesn't, then a non-young // region will complain that it cannot support allocations without - // BOT updates. So we'll tag the dummy region as young to avoid that. - dummy_region->set_young(); + // BOT updates. So we'll tag the dummy region as eden to avoid that. + dummy_region->set_eden(); // Make sure it's full. dummy_region->set_top(dummy_region->end()); G1AllocRegion::setup(this, dummy_region); - init_mutator_alloc_region(); + _allocator->init_mutator_alloc_region(); // Do create of the monitoring and management support so that // values in the heap have been properly initialized. @@ -2188,6 +2158,11 @@ } } +void G1CollectedHeap::clear_humongous_is_live_table() { + guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Should only be called if true"); + _humongous_is_live.clear(); +} + size_t G1CollectedHeap::conservative_max_heap_alignment() { return HeapRegion::max_region_size(); } @@ -2267,14 +2242,14 @@ } size_t G1CollectedHeap::capacity() const { - return _g1_committed.byte_size(); + return _hrm.length() * HeapRegion::GrainBytes; } void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) { assert(!hr->continuesHumongous(), "pre-condition"); hr->reset_gc_time_stamp(); if (hr->startsHumongous()) { - uint first_index = hr->hrs_index() + 1; + uint first_index = hr->hrm_index() + 1; uint last_index = hr->last_hc_index(); for (uint i = first_index; i < last_index; i += 1) { HeapRegion* chr = region_at(i); @@ -2335,21 +2310,12 @@ // Computes the sum of the storage used by the various regions. - size_t G1CollectedHeap::used() const { - assert(Heap_lock->owner() != NULL, - "Should be owned on this thread's behalf."); - size_t result = _summary_bytes_used; - // Read only once in case it is set to NULL concurrently - HeapRegion* hr = _mutator_alloc_region.get(); - if (hr != NULL) - result += hr->used(); - return result; + return _allocator->used(); } size_t G1CollectedHeap::used_unlocked() const { - size_t result = _summary_bytes_used; - return result; + return _allocator->used_unlocked(); } class SumUsedClosure: public HeapRegionClosure { @@ -2375,30 +2341,12 @@ return blk.result(); } -size_t G1CollectedHeap::unsafe_max_alloc() { - if (free_regions() > 0) return HeapRegion::GrainBytes; - // otherwise, is there space in the current allocation region? - - // We need to store the current allocation region in a local variable - // here. The problem is that this method doesn't take any locks and - // there may be other threads which overwrite the current allocation - // region field. attempt_allocation(), for example, sets it to NULL - // and this can happen *after* the NULL check here but before the call - // to free(), resulting in a SIGSEGV. Note that this doesn't appear - // to be a problem in the optimized build, since the two loads of the - // current allocation region field are optimized away. - HeapRegion* hr = _mutator_alloc_region.get(); - if (hr == NULL) { - return 0; - } - return hr->free(); -} - bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { switch (cause) { case GCCause::_gc_locker: return GCLockerInvokesConcurrent; case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent; case GCCause::_g1_humongous_allocation: return true; + case GCCause::_update_allocation_context_stats_inc: return true; default: return false; } } @@ -2412,7 +2360,8 @@ for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { // Let's use the existing mechanism for the allocation - HeapWord* dummy_obj = humongous_obj_allocate(word_size); + HeapWord* dummy_obj = humongous_obj_allocate(word_size, + AllocationContext::system()); if (dummy_obj != NULL) { MemRegion mr(dummy_obj, word_size); CollectedHeap::fill_with_object(mr); @@ -2530,6 +2479,7 @@ unsigned int gc_count_before; unsigned int old_marking_count_before; + unsigned int full_gc_count_before; bool retry_gc; do { @@ -2540,6 +2490,7 @@ // Read the GC count while holding the Heap_lock gc_count_before = total_collections(); + full_gc_count_before = total_full_collections(); old_marking_count_before = _old_marking_cycles_started; } @@ -2552,6 +2503,7 @@ true, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms(), cause); + op.set_allocation_context(AllocationContext::current()); VMThread::execute(&op); if (!op.pause_succeeded()) { @@ -2570,7 +2522,7 @@ } } } else { - if (cause == GCCause::_gc_locker + if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { // Schedule a standard evacuation pause. We're setting word_size @@ -2583,7 +2535,7 @@ VMThread::execute(&op); } else { // Schedule a Full GC. - VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause); + VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); VMThread::execute(&op); } } @@ -2591,8 +2543,8 @@ } bool G1CollectedHeap::is_in(const void* p) const { - if (_g1_committed.contains(p)) { - // Given that we know that p is in the committed space, + if (_hrm.reserved().contains(p)) { + // Given that we know that p is in the reserved space, // heap_region_containing_raw() should successfully // return the containing region. HeapRegion* hr = heap_region_containing_raw(p); @@ -2602,17 +2554,26 @@ } } +#ifdef ASSERT +bool G1CollectedHeap::is_in_exact(const void* p) const { + bool contains = reserved_region().contains(p); + bool available = _hrm.is_available(addr_to_region((HeapWord*)p)); + if (contains && available) { + return true; + } else { + return false; + } +} +#endif + // Iteration functions. -// Iterates an OopClosure over all ref-containing fields of objects -// within a HeapRegion. +// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion. class IterateOopClosureRegionClosure: public HeapRegionClosure { - MemRegion _mr; ExtendedOopClosure* _cl; public: - IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl) - : _mr(mr), _cl(cl) {} + IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {} bool doHeapRegion(HeapRegion* r) { if (!r->continuesHumongous()) { r->oop_iterate(_cl); @@ -2622,12 +2583,7 @@ }; void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) { - IterateOopClosureRegionClosure blk(_g1_committed, cl); - heap_region_iterate(&blk); -} - -void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { - IterateOopClosureRegionClosure blk(mr, cl); + IterateOopClosureRegionClosure blk(cl); heap_region_iterate(&blk); } @@ -2668,89 +2624,15 @@ } void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { - _hrs.iterate(cl); + _hrm.iterate(cl); } void G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, uint worker_id, - uint no_of_par_workers, - jint claim_value) { - const uint regions = n_regions(); - const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ? - no_of_par_workers : - 1); - assert(UseDynamicNumberOfGCThreads || - no_of_par_workers == workers()->total_workers(), - "Non dynamic should use fixed number of workers"); - // try to spread out the starting points of the workers - const HeapRegion* start_hr = - start_region_for_worker(worker_id, no_of_par_workers); - const uint start_index = start_hr->hrs_index(); - - // each worker will actually look at all regions - for (uint count = 0; count < regions; ++count) { - const uint index = (start_index + count) % regions; - assert(0 <= index && index < regions, "sanity"); - HeapRegion* r = region_at(index); - // we'll ignore "continues humongous" regions (we'll process them - // when we come across their corresponding "start humongous" - // region) and regions already claimed - if (r->claim_value() == claim_value || r->continuesHumongous()) { - continue; - } - // OK, try to claim it - if (r->claimHeapRegion(claim_value)) { - // success! - assert(!r->continuesHumongous(), "sanity"); - if (r->startsHumongous()) { - // If the region is "starts humongous" we'll iterate over its - // "continues humongous" first; in fact we'll do them - // first. The order is important. In on case, calling the - // closure on the "starts humongous" region might de-allocate - // and clear all its "continues humongous" regions and, as a - // result, we might end up processing them twice. So, we'll do - // them first (notice: most closures will ignore them anyway) and - // then we'll do the "starts humongous" region. - for (uint ch_index = index + 1; ch_index < regions; ++ch_index) { - HeapRegion* chr = region_at(ch_index); - - // if the region has already been claimed or it's not - // "continues humongous" we're done - if (chr->claim_value() == claim_value || - !chr->continuesHumongous()) { - break; - } - - // No one should have claimed it directly. We can given - // that we claimed its "starts humongous" region. - assert(chr->claim_value() != claim_value, "sanity"); - assert(chr->humongous_start_region() == r, "sanity"); - - if (chr->claimHeapRegion(claim_value)) { - // we should always be able to claim it; no one else should - // be trying to claim this region - - bool res2 = cl->doHeapRegion(chr); - assert(!res2, "Should not abort"); - - // Right now, this holds (i.e., no closure that actually - // does something with "continues humongous" regions - // clears them). We might have to weaken it in the future, - // but let's leave these two asserts here for extra safety. - assert(chr->continuesHumongous(), "should still be the case"); - assert(chr->humongous_start_region() == r, "sanity"); - } else { - guarantee(false, "we should not reach here"); - } - } - } - - assert(!r->continuesHumongous(), "sanity"); - bool res = cl->doHeapRegion(r); - assert(!res, "Should not abort"); - } - } + uint num_workers, + jint claim_value) const { + _hrm.par_iterate(cl, worker_id, num_workers, claim_value); } class ResetClaimValuesClosure: public HeapRegionClosure { @@ -2928,17 +2810,6 @@ return result; } -HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i, - uint no_of_par_workers) { - uint worker_num = - G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U; - assert(UseDynamicNumberOfGCThreads || - no_of_par_workers == workers()->total_workers(), - "Non dynamic should use fixed number of workers"); - const uint start_index = n_regions() * worker_i / worker_num; - return region_at(start_index); -} - void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { HeapRegion* r = g1_policy()->collection_set(); while (r != NULL) { @@ -2980,27 +2851,25 @@ } } -CompactibleSpace* G1CollectedHeap::first_compactible_space() { - return n_regions() > 0 ? region_at(0) : NULL; -} - +HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const { + HeapRegion* result = _hrm.next_region_in_heap(from); + while (result != NULL && result->isHumongous()) { + result = _hrm.next_region_in_heap(result); + } + return result; +} Space* G1CollectedHeap::space_containing(const void* addr) const { - Space* res = heap_region_containing(addr); - return res; + return heap_region_containing(addr); } HeapWord* G1CollectedHeap::block_start(const void* addr) const { Space* sp = space_containing(addr); - if (sp != NULL) { - return sp->block_start(addr); - } - return NULL; + return sp->block_start(addr); } size_t G1CollectedHeap::block_size(const HeapWord* addr) const { Space* sp = space_containing(addr); - assert(sp != NULL, "block_size of address outside of heap"); return sp->block_size(addr); } @@ -3035,7 +2904,7 @@ // since we can't allow tlabs to grow big enough to accommodate // humongous objects. - HeapRegion* hr = _mutator_alloc_region.get(); + HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get(); size_t max_tlab = max_tlab_size() * wordSize; if (hr == NULL) { return max_tlab; @@ -3045,7 +2914,7 @@ } size_t G1CollectedHeap::max_capacity() const { - return _g1_reserved.byte_size(); + return _hrm.reserved().byte_size(); } jlong G1CollectedHeap::millis_since_last_gc() { @@ -3414,25 +3283,20 @@ if (!silent) { gclog_or_tty->print("Roots "); } VerifyRootsClosure rootsCl(vo); + VerifyKlassClosure klassCl(this, &rootsCl); + CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false); + + // We apply the relevant closures to all the oops in the + // system dictionary, class loader data graph, the string table + // and the nmethods in the code cache. G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo); G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); - VerifyKlassClosure klassCl(this, &rootsCl); - - // We apply the relevant closures to all the oops in the - // system dictionary, the string table and the code cache. - const int so = SO_AllClasses | SO_Strings | SO_CodeCache; - - // Need cleared claim bits for the strong roots processing - ClassLoaderDataGraph::clear_claimed_marks(); - - process_strong_roots(true, // activate StrongRootsScope - false, // we set "is scavenging" to false, - // so we don't reset the dirty cards. - ScanningOption(so), // roots scanning options - &rootsCl, - &blobsCl, - &klassCl - ); + + process_all_roots(true, // activate StrongRootsScope + SO_AllCodeCache, // roots scanning options + &rootsCl, + &cldCl, + &blobsCl); bool failures = rootsCl.failures() || codeRootsCl.failures(); @@ -3579,9 +3443,9 @@ st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", capacity()/K, used_unlocked()/K); st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", - _g1_storage.low_boundary(), - _g1_storage.high(), - _g1_storage.high_boundary()); + _hrm.reserved().start(), + _hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords, + _hrm.reserved().end()); st->cr(); st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); uint young_regions = _young_list->length(); @@ -3727,7 +3591,7 @@ } } -void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { +void G1CollectedHeap::gc_epilogue(bool full) { if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) && @@ -3744,6 +3608,7 @@ // always_do_update_barrier = true; resize_all_tlabs(); + allocation_context_stats().update(full); // We have just completed a GC. Update the soft reference // policy with the new heap occupancy @@ -3761,6 +3626,8 @@ false, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms(), gc_cause); + + op.set_allocation_context(AllocationContext::current()); VMThread::execute(&op); HeapWord* result = op.result(); @@ -3804,6 +3671,61 @@ return g1_rem_set()->cardsScanned(); } +bool G1CollectedHeap::humongous_region_is_always_live(uint index) { + HeapRegion* region = region_at(index); + assert(region->startsHumongous(), "Must start a humongous object"); + return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty(); +} + +class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure { + private: + size_t _total_humongous; + size_t _candidate_humongous; + public: + RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) { + } + + virtual bool doHeapRegion(HeapRegion* r) { + if (!r->startsHumongous()) { + return false; + } + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + uint region_idx = r->hrm_index(); + bool is_candidate = !g1h->humongous_region_is_always_live(region_idx); + // Is_candidate already filters out humongous regions with some remembered set. + // This will not lead to humongous object that we mistakenly keep alive because + // during young collection the remembered sets will only be added to. + if (is_candidate) { + g1h->register_humongous_region_with_in_cset_fast_test(region_idx); + _candidate_humongous++; + } + _total_humongous++; + + return false; + } + + size_t total_humongous() const { return _total_humongous; } + size_t candidate_humongous() const { return _candidate_humongous; } +}; + +void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() { + if (!G1ReclaimDeadHumongousObjectsAtYoungGC) { + g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0); + return; + } + + RegisterHumongousWithInCSetFastTestClosure cl; + heap_region_iterate(&cl); + g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(), + cl.candidate_humongous()); + _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0; + + if (_has_humongous_reclaim_candidates) { + clear_humongous_is_live_table(); + } +} + void G1CollectedHeap::setup_surviving_young_words() { assert(_surviving_young_words == NULL, "pre-condition"); @@ -3893,8 +3815,7 @@ return; } - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id()); GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)") @@ -4015,6 +3936,7 @@ increment_gc_time_stamp(); verify_before_gc(); + check_bitmaps("GC Start"); COMPILER2_PRESENT(DerivedPointerTable::clear()); @@ -4035,7 +3957,7 @@ // Forget the current alloc region (we might even choose it to be part // of the collection set!). - release_mutator_alloc_region(); + _allocator->release_mutator_alloc_region(); // We should call this after we retire the mutator alloc // region(s) so that all the ALLOC / RETIRE events are generated @@ -4090,6 +4012,8 @@ g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); + register_humongous_regions_with_in_cset_fast_test(); + _cm->note_start_of_gc(); // We should not verify the per-thread SATB buffers given that // we have not filtered them yet (we'll do so during the @@ -4103,14 +4027,6 @@ if (_hr_printer.is_active()) { HeapRegion* hr = g1_policy()->collection_set(); while (hr != NULL) { - G1HRPrinter::RegionType type; - if (!hr->is_young()) { - type = G1HRPrinter::Old; - } else if (hr->is_survivor()) { - type = G1HRPrinter::Survivor; - } else { - type = G1HRPrinter::Eden; - } _hr_printer.cset(hr); hr = hr->next_in_collection_set(); } @@ -4124,7 +4040,7 @@ setup_surviving_young_words(); // Initialize the GC alloc regions. - init_gc_alloc_regions(evacuation_info); + _allocator->init_gc_alloc_regions(evacuation_info); // Actually do the work... evacuate_collection_set(evacuation_info); @@ -4140,6 +4056,9 @@ true /* verify_fingers */); free_collection_set(g1_policy()->collection_set(), evacuation_info); + + eagerly_reclaim_humongous_regions(); + g1_policy()->clear_collection_set(); cleanup_surviving_young_words(); @@ -4147,9 +4066,6 @@ // Start a new incremental collection set for the next pause. g1_policy()->start_incremental_cset_building(); - // Clear the _cset_fast_test bitmap in anticipation of adding - // regions to the incremental collection set for the next - // evacuation pause. clear_cset_fast_test(); _young_list->reset_sampled_info(); @@ -4173,7 +4089,7 @@ _young_list->reset_auxilary_lists(); if (evacuation_failed()) { - _summary_bytes_used = recalculate_used(); + _allocator->set_used(recalculate_used()); uint n_queues = MAX2((int)ParallelGCThreads, 1); for (uint i = 0; i < n_queues; i++) { if (_evacuation_failed_info_array[i].has_failed()) { @@ -4183,7 +4099,7 @@ } else { // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. - _summary_bytes_used += g1_policy()->bytes_copied_during_gc(); + _allocator->increase_used(g1_policy()->bytes_copied_during_gc()); } if (g1_policy()->during_initial_mark_pause()) { @@ -4205,7 +4121,7 @@ g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - init_mutator_alloc_region(); + _allocator->init_mutator_alloc_region(); { size_t expand_bytes = g1_policy()->expansion_amount(); @@ -4214,10 +4130,7 @@ // No need for an ergo verbose message here, // expansion_amount() does this when it returns a value > 0. if (!expand(expand_bytes)) { - // We failed to expand the heap so let's verify that - // committed/uncommitted amount match the backing store - assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); - assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); + // We failed to expand the heap. Cannot do anything about it. } } } @@ -4263,6 +4176,7 @@ increment_gc_time_stamp(); verify_after_gc(); + check_bitmaps("GC End"); assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); ref_processor_stw()->verify_no_references_recorded(); @@ -4276,10 +4190,6 @@ // RETIRE events are generated before the end GC event. _hr_printer.end_gc(false /* full */, (size_t) total_collections()); - if (mark_in_progress()) { - concurrent_mark()->update_g1_committed(); - } - #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif @@ -4295,7 +4205,7 @@ // output from the concurrent mark thread interfering with this // logging output either. - _hrs.verify_optional(); + _hrm.verify_optional(); verify_region_sets_optional(); TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); @@ -4326,7 +4236,7 @@ // this point does not assume that we are the only GC thread // running. Note: of course, the actual marking work will // not start until the safepoint itself is released in - // ConcurrentGCThread::safepoint_desynchronize(). + // SuspendibleThreadSet::desynchronize(). doConcurrentMark(); } @@ -4356,75 +4266,6 @@ return MIN2(_humongous_object_threshold_in_words, gclab_word_size); } -void G1CollectedHeap::init_mutator_alloc_region() { - assert(_mutator_alloc_region.get() == NULL, "pre-condition"); - _mutator_alloc_region.init(); -} - -void G1CollectedHeap::release_mutator_alloc_region() { - _mutator_alloc_region.release(); - assert(_mutator_alloc_region.get() == NULL, "post-condition"); -} - -void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { - assert_at_safepoint(true /* should_be_vm_thread */); - - _survivor_gc_alloc_region.init(); - _old_gc_alloc_region.init(); - HeapRegion* retained_region = _retained_old_gc_alloc_region; - _retained_old_gc_alloc_region = NULL; - - // We will discard the current GC alloc region if: - // a) it's in the collection set (it can happen!), - // b) it's already full (no point in using it), - // c) it's empty (this means that it was emptied during - // a cleanup and it should be on the free list now), or - // d) it's humongous (this means that it was emptied - // during a cleanup and was added to the free list, but - // has been subsequently used to allocate a humongous - // object that may be less than the region size). - if (retained_region != NULL && - !retained_region->in_collection_set() && - !(retained_region->top() == retained_region->end()) && - !retained_region->is_empty() && - !retained_region->isHumongous()) { - retained_region->set_saved_mark(); - // The retained region was added to the old region set when it was - // retired. We have to remove it now, since we don't allow regions - // we allocate to in the region sets. We'll re-add it later, when - // it's retired again. - _old_set.remove(retained_region); - bool during_im = g1_policy()->during_initial_mark_pause(); - retained_region->note_start_of_copying(during_im); - _old_gc_alloc_region.set(retained_region); - _hr_printer.reuse(retained_region); - evacuation_info.set_alloc_regions_used_before(retained_region->used()); - } -} - -void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { - evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() + - _old_gc_alloc_region.count()); - _survivor_gc_alloc_region.release(); - // If we have an old GC alloc region to release, we'll save it in - // _retained_old_gc_alloc_region. If we don't - // _retained_old_gc_alloc_region will become NULL. This is what we - // want either way so no reason to check explicitly for either - // condition. - _retained_old_gc_alloc_region = _old_gc_alloc_region.release(); - - if (ResizePLAB) { - _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); - _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); - } -} - -void G1CollectedHeap::abandon_gc_alloc_regions() { - assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition"); - assert(_old_gc_alloc_region.get() == NULL, "pre-condition"); - _retained_old_gc_alloc_region = NULL; -} - void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { _drain_in_progress = false; set_evac_failure_closure(cl); @@ -4565,25 +4406,26 @@ } HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, - size_t word_size) { + size_t word_size, + AllocationContext_t context) { if (purpose == GCAllocForSurvived) { - HeapWord* result = survivor_attempt_allocation(word_size); + HeapWord* result = survivor_attempt_allocation(word_size, context); if (result != NULL) { return result; } else { // Let's try to allocate in the old gen in case we can fit the // object there. - return old_attempt_allocation(word_size); + return old_attempt_allocation(word_size, context); } } else { assert(purpose == GCAllocForTenured, "sanity"); - HeapWord* result = old_attempt_allocation(word_size); + HeapWord* result = old_attempt_allocation(word_size, context); if (result != NULL) { return result; } else { // Let's try to allocate in the survivors in case we can fit the // object there. - return survivor_attempt_allocation(word_size); + return survivor_attempt_allocation(word_size, context); } } @@ -4592,154 +4434,20 @@ return NULL; } -G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : - ParGCAllocBuffer(gclab_word_size), _retired(false) { } - -G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp) - : _g1h(g1h), - _refs(g1h->task_queue(queue_num)), - _dcq(&g1h->dirty_card_queue_set()), - _ct_bs(g1h->g1_barrier_set()), - _g1_rem(g1h->g1_rem_set()), - _hash_seed(17), _queue_num(queue_num), - _term_attempts(0), - _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), - _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), - _age_table(false), _scanner(g1h, this, rp), - _strong_roots_time(0), _term_time(0), - _alloc_buffer_waste(0), _undo_waste(0) { - // we allocate G1YoungSurvRateNumRegions plus one entries, since - // we "sacrifice" entry 0 to keep track of surviving bytes for - // non-young regions (where the age is -1) - // We also add a few elements at the beginning and at the end in - // an attempt to eliminate cache contention - uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length(); - uint array_length = PADDING_ELEM_NUM + - real_length + - PADDING_ELEM_NUM; - _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); - if (_surviving_young_words_base == NULL) - vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR, - "Not enough space for young surv histo."); - _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; - memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t)); - - _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; - _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; - - _start = os::elapsedTime(); -} - -void -G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) -{ - st->print_raw_cr("GC Termination Stats"); - st->print_raw_cr(" elapsed --strong roots-- -------termination-------" - " ------waste (KiB)------"); - st->print_raw_cr("thr ms ms % ms % attempts" - " total alloc undo"); - st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" - " ------- ------- -------"); -} - -void -G1ParScanThreadState::print_termination_stats(int i, - outputStream* const st) const -{ - const double elapsed_ms = elapsed_time() * 1000.0; - const double s_roots_ms = strong_roots_time() * 1000.0; - const double term_ms = term_time() * 1000.0; - st->print_cr("%3d %9.2f %9.2f %6.2f " - "%9.2f %6.2f " SIZE_FORMAT_W(8) " " - SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), - i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, - term_ms, term_ms * 100 / elapsed_ms, term_attempts(), - (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, - alloc_buffer_waste() * HeapWordSize / K, - undo_waste() * HeapWordSize / K); -} - -#ifdef ASSERT -bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { - assert(ref != NULL, "invariant"); - assert(UseCompressedOops, "sanity"); - assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); - oop p = oopDesc::load_decode_heap_oop(ref); - assert(_g1h->is_in_g1_reserved(p), - err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p)); - return true; -} - -bool G1ParScanThreadState::verify_ref(oop* ref) const { - assert(ref != NULL, "invariant"); - if (has_partial_array_mask(ref)) { - // Must be in the collection set--it's already been copied. - oop p = clear_partial_array_mask(ref); - assert(_g1h->obj_in_cs(p), - err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p)); - } else { - oop p = oopDesc::load_decode_heap_oop(ref); - assert(_g1h->is_in_g1_reserved(p), - err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p)); - } - return true; -} - -bool G1ParScanThreadState::verify_task(StarTask ref) const { - if (ref.is_narrow()) { - return verify_ref((narrowOop*) ref); - } else { - return verify_ref((oop*) ref); - } -} -#endif // ASSERT - -void G1ParScanThreadState::trim_queue() { - assert(_evac_failure_cl != NULL, "not set"); - - StarTask ref; - do { - // Drain the overflow stack first, so other threads can steal. - while (refs()->pop_overflow(ref)) { - deal_with_reference(ref); - } - - while (refs()->pop_local(ref)) { - deal_with_reference(ref); - } - } while (!refs()->is_empty()); -} - -G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, - G1ParScanThreadState* par_scan_state) : - _g1(g1), _par_scan_state(par_scan_state), - _worker_id(par_scan_state->queue_num()) { } - void G1ParCopyHelper::mark_object(oop obj) { -#ifdef ASSERT - HeapRegion* hr = _g1->heap_region_containing(obj); - assert(hr != NULL, "sanity"); - assert(!hr->in_collection_set(), "should not mark objects in the CSet"); -#endif // ASSERT + assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); // We know that the object is not moving so it's safe to read its size. _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); } void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { -#ifdef ASSERT assert(from_obj->is_forwarded(), "from obj should be forwarded"); assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); assert(from_obj != to_obj, "should not be self-forwarded"); - HeapRegion* from_hr = _g1->heap_region_containing(from_obj); - assert(from_hr != NULL, "sanity"); - assert(from_hr->in_collection_set(), "from obj should be in the CSet"); - - HeapRegion* to_hr = _g1->heap_region_containing(to_obj); - assert(to_hr != NULL, "sanity"); - assert(!to_hr->in_collection_set(), "should not mark objects in the CSet"); -#endif // ASSERT + assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet"); + assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet"); // The object might be in the process of being copied by another // worker so we cannot trust that its to-space image is @@ -4748,107 +4456,6 @@ _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id); } -oop G1ParScanThreadState::copy_to_survivor_space(oop const old) { - size_t word_sz = old->size(); - HeapRegion* from_region = _g1h->heap_region_containing_raw(old); - // +1 to make the -1 indexes valid... - int young_index = from_region->young_index_in_cset()+1; - assert( (from_region->is_young() && young_index > 0) || - (!from_region->is_young() && young_index == 0), "invariant" ); - G1CollectorPolicy* g1p = _g1h->g1_policy(); - markOop m = old->mark(); - int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() - : m->age(); - GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, - word_sz); - HeapWord* obj_ptr = allocate(alloc_purpose, word_sz); -#ifndef PRODUCT - // Should this evacuation fail? - if (_g1h->evacuation_should_fail()) { - if (obj_ptr != NULL) { - undo_allocation(alloc_purpose, obj_ptr, word_sz); - obj_ptr = NULL; - } - } -#endif // !PRODUCT - - if (obj_ptr == NULL) { - // This will either forward-to-self, or detect that someone else has - // installed a forwarding pointer. - return _g1h->handle_evacuation_failure_par(this, old); - } - - oop obj = oop(obj_ptr); - - // We're going to allocate linearly, so might as well prefetch ahead. - Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); - - oop forward_ptr = old->forward_to_atomic(obj); - if (forward_ptr == NULL) { - Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); - - // alloc_purpose is just a hint to allocate() above, recheck the type of region - // we actually allocated from and update alloc_purpose accordingly - HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr); - alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured; - - if (g1p->track_object_age(alloc_purpose)) { - // We could simply do obj->incr_age(). However, this causes a - // performance issue. obj->incr_age() will first check whether - // the object has a displaced mark by checking its mark word; - // getting the mark word from the new location of the object - // stalls. So, given that we already have the mark word and we - // are about to install it anyway, it's better to increase the - // age on the mark word, when the object does not have a - // displaced mark word. We're not expecting many objects to have - // a displaced marked word, so that case is not optimized - // further (it could be...) and we simply call obj->incr_age(). - - if (m->has_displaced_mark_helper()) { - // in this case, we have to install the mark word first, - // otherwise obj looks to be forwarded (the old mark word, - // which contains the forward pointer, was copied) - obj->set_mark(m); - obj->incr_age(); - } else { - m = m->incr_age(); - obj->set_mark(m); - } - age_table()->add(obj, word_sz); - } else { - obj->set_mark(m); - } - - if (G1StringDedup::is_enabled()) { - G1StringDedup::enqueue_from_evacuation(from_region->is_young(), - to_region->is_young(), - queue_num(), - obj); - } - - size_t* surv_young_words = surviving_young_words(); - surv_young_words[young_index] += word_sz; - - if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { - // We keep track of the next start index in the length field of - // the to-space object. The actual length can be found in the - // length field of the from-space object. - arrayOop(obj)->set_length(0); - oop* old_p = set_partial_array_mask(old); - push_on_queue(old_p); - } else { - // No point in using the slower heap_region_containing() method, - // given that we know obj is in the heap. - _scanner.set_region(_g1h->heap_region_containing_raw(obj)); - obj->oop_iterate_backwards(&_scanner); - } - } else { - undo_allocation(alloc_purpose, obj_ptr, word_sz); - obj = forward_ptr; - } - return obj; -} - template void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) { if (_g1->heap_region_containing_raw(new_obj)->is_young()) { @@ -4856,7 +4463,7 @@ } } -template +template template void G1ParCopyClosure::do_oop_work(T* p) { T heap_oop = oopDesc::load_heap_oop(p); @@ -4869,7 +4476,9 @@ assert(_worker_id == _par_scan_state->queue_num(), "sanity"); - if (_g1->in_cset_fast_test(obj)) { + G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj); + + if (state == G1CollectedHeap::InCSet) { oop forwardee; if (obj->is_forwarded()) { forwardee = obj->forwardee(); @@ -4878,7 +4487,7 @@ } assert(forwardee != NULL, "forwardee should not be NULL"); oopDesc::encode_store_heap_oop(p, forwardee); - if (do_mark_object && forwardee != obj) { + if (do_mark_object != G1MarkNone && forwardee != obj) { // If the object is self-forwarded we don't need to explicitly // mark it, the evacuation failure protocol will do so. mark_forwarded_object(obj, forwardee); @@ -4888,10 +4497,12 @@ do_klass_barrier(p, forwardee); } } else { + if (state == G1CollectedHeap::IsHumongous) { + _g1->set_humongous_is_live(obj); + } // The object is not in collection set. If we're a root scanning - // closure during an initial mark pause (i.e. do_mark_object will - // be true) then attempt to mark the object. - if (do_mark_object) { + // closure during an initial mark pause then attempt to mark the object. + if (do_mark_object == G1MarkFromRoot) { mark_object(obj); } } @@ -4901,8 +4512,8 @@ } } -template void G1ParCopyClosure::do_oop_work(oop* p); -template void G1ParCopyClosure::do_oop_work(narrowOop* p); +template void G1ParCopyClosure::do_oop_work(oop* p); +template void G1ParCopyClosure::do_oop_work(narrowOop* p); class G1ParEvacuateFollowersClosure : public VoidClosure { protected: @@ -4938,27 +4549,11 @@ } void G1ParEvacuateFollowersClosure::do_void() { - StarTask stolen_task; G1ParScanThreadState* const pss = par_scan_state(); pss->trim_queue(); - do { - while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { - assert(pss->verify_task(stolen_task), "sanity"); - if (stolen_task.is_narrow()) { - pss->deal_with_reference((narrowOop*) stolen_task); - } else { - pss->deal_with_reference((oop*) stolen_task); - } - - // We've just processed a reference and we might have made - // available new entries on the queues. So we have to make sure - // we drain the queues as necessary. - pss->trim_queue(); - } + pss->steal_and_trim_queue(queues()); } while (!offer_termination()); - - pss->retire_alloc_buffers(); } class G1KlassScanClosure : public KlassClosure { @@ -4987,6 +4582,56 @@ } }; +class G1CodeBlobClosure : public CodeBlobClosure { + class HeapRegionGatheringOopClosure : public OopClosure { + G1CollectedHeap* _g1h; + OopClosure* _work; + nmethod* _nm; + + template + void do_oop_work(T* p) { + _work->do_oop(p); + T oop_or_narrowoop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(oop_or_narrowoop)) { + oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop); + HeapRegion* hr = _g1h->heap_region_containing_raw(o); + assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset"); + hr->add_strong_code_root(_nm); + } + } + + public: + HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {} + + void do_oop(oop* o) { + do_oop_work(o); + } + + void do_oop(narrowOop* o) { + do_oop_work(o); + } + + void set_nm(nmethod* nm) { + _nm = nm; + } + }; + + HeapRegionGatheringOopClosure _oc; +public: + G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {} + + void do_code_blob(CodeBlob* cb) { + nmethod* nm = cb->as_nmethod_or_null(); + if (nm != NULL) { + if (!nm->test_set_oops_do_mark()) { + _oc.set_nm(nm); + nm->oops_do(&_oc); + nm->fix_oop_relocations(); + } + } + } +}; + class G1ParTask : public AbstractGangTask { protected: G1CollectedHeap* _g1h; @@ -4997,14 +4642,8 @@ Mutex _stats_lock; Mutex* stats_lock() { return &_stats_lock; } - size_t getNCards() { - return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) - / G1BlockOffsetSharedArray::N_bytes; - } - public: - G1ParTask(G1CollectedHeap* g1h, - RefToScanQueueSet *task_queues) + G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues) : AbstractGangTask("G1 collection"), _g1h(g1h), _queues(task_queues), @@ -5032,6 +4671,35 @@ _n_workers = active_workers; } + // Helps out with CLD processing. + // + // During InitialMark we need to: + // 1) Scavenge all CLDs for the young GC. + // 2) Mark all objects directly reachable from strong CLDs. + template + class G1CLDClosure : public CLDClosure { + G1ParCopyClosure* _oop_closure; + G1ParCopyClosure _oop_in_klass_closure; + G1KlassScanClosure _klass_in_cld_closure; + bool _claim; + + public: + G1CLDClosure(G1ParCopyClosure* oop_closure, + bool only_young, bool claim) + : _oop_closure(oop_closure), + _oop_in_klass_closure(oop_closure->g1(), + oop_closure->pss(), + oop_closure->rp()), + _klass_in_cld_closure(&_oop_in_klass_closure, only_young), + _claim(claim) { + + } + + void do_cld(ClassLoaderData* cld) { + cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim); + } + }; + void work(uint worker_id) { if (worker_id >= _n_workers) return; // no work needed this round @@ -5049,40 +4717,67 @@ pss.set_evac_failure_closure(&evac_failure_cl); - G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp); - G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp); - - G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp); - G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp); - - bool only_young = _g1h->g1_policy()->gcs_are_young(); - G1KlassScanClosure scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false); - G1KlassScanClosure only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young); - - OopClosure* scan_root_cl = &only_scan_root_cl; - G1KlassScanClosure* scan_klasses_cl = &only_scan_klasses_cl_s; + bool only_young = _g1h->g1_policy()->gcs_are_young(); + + // Non-IM young GC. + G1ParCopyClosure scan_only_root_cl(_g1h, &pss, rp); + G1CLDClosure scan_only_cld_cl(&scan_only_root_cl, + only_young, // Only process dirty klasses. + false); // No need to claim CLDs. + // IM young GC. + // Strong roots closures. + G1ParCopyClosure scan_mark_root_cl(_g1h, &pss, rp); + G1CLDClosure scan_mark_cld_cl(&scan_mark_root_cl, + false, // Process all klasses. + true); // Need to claim CLDs. + // Weak roots closures. + G1ParCopyClosure scan_mark_weak_root_cl(_g1h, &pss, rp); + G1CLDClosure scan_mark_weak_cld_cl(&scan_mark_weak_root_cl, + false, // Process all klasses. + true); // Need to claim CLDs. + + G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl); + G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl); + // IM Weak code roots are handled later. + + OopClosure* strong_root_cl; + OopClosure* weak_root_cl; + CLDClosure* strong_cld_cl; + CLDClosure* weak_cld_cl; + CodeBlobClosure* strong_code_cl; if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. - scan_root_cl = &scan_mark_root_cl; - scan_klasses_cl = &scan_mark_klasses_cl_s; + strong_root_cl = &scan_mark_root_cl; + strong_cld_cl = &scan_mark_cld_cl; + strong_code_cl = &scan_mark_code_cl; + if (ClassUnloadingWithConcurrentMark) { + weak_root_cl = &scan_mark_weak_root_cl; + weak_cld_cl = &scan_mark_weak_cld_cl; + } else { + weak_root_cl = &scan_mark_root_cl; + weak_cld_cl = &scan_mark_cld_cl; + } + } else { + strong_root_cl = &scan_only_root_cl; + weak_root_cl = &scan_only_root_cl; + strong_cld_cl = &scan_only_cld_cl; + weak_cld_cl = &scan_only_cld_cl; + strong_code_cl = &scan_only_code_cl; } - G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); - - // Don't scan the scavengable methods in the code cache as part - // of strong root scanning. The code roots that point into a - // region in the collection set are scanned when we scan the - // region's RSet. - int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings; + + G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); pss.start_strong_roots(); - _g1h->g1_process_strong_roots(/* is scavenging */ true, - SharedHeap::ScanningOption(so), - scan_root_cl, - &push_heap_rs_cl, - scan_klasses_cl, - worker_id); + _g1h->g1_process_roots(strong_root_cl, + weak_root_cl, + &push_heap_rs_cl, + strong_cld_cl, + weak_cld_cl, + strong_code_cl, + worker_id); + pss.end_strong_roots(); { @@ -5102,7 +4797,7 @@ pss.print_termination_stats(worker_id); } - assert(pss.refs()->is_empty(), "should be empty"); + assert(pss.queue_is_empty(), "should be empty"); // Close the inner scope so that the ResourceMark and HandleMark // destructors are executed here and are included as part of the @@ -5120,30 +4815,32 @@ void G1CollectedHeap:: -g1_process_strong_roots(bool is_scavenging, - ScanningOption so, - OopClosure* scan_non_heap_roots, - OopsInHeapRegionClosure* scan_rs, - G1KlassScanClosure* scan_klasses, - uint worker_i) { - - // First scan the strong roots +g1_process_roots(OopClosure* scan_non_heap_roots, + OopClosure* scan_non_heap_weak_roots, + OopsInHeapRegionClosure* scan_rs, + CLDClosure* scan_strong_clds, + CLDClosure* scan_weak_clds, + CodeBlobClosure* scan_strong_code, + uint worker_i) { + + // First scan the shared roots. double ext_roots_start = os::elapsedTime(); double closure_app_time_sec = 0.0; + bool during_im = _g1h->g1_policy()->during_initial_mark_pause(); + bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark; + BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); - - assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow"); - // Walk the code cache/strong code roots w/o buffering, because StarTask - // cannot handle unaligned oop locations. - CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */); - - process_strong_roots(false, // no scoping; this is parallel code - is_scavenging, so, - &buf_scan_non_heap_roots, - &eager_scan_code_roots, - scan_klasses - ); + BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots); + + process_roots(false, // no scoping; this is parallel code + SharedHeap::SO_None, + &buf_scan_non_heap_roots, + &buf_scan_non_heap_weak_roots, + scan_strong_clds, + // Unloading Initial Marks handle the weak CLDs separately. + (trace_metadata ? NULL : scan_weak_clds), + scan_strong_code); // Now the CM ref_processor roots. if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { @@ -5154,10 +4851,21 @@ ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots); } + if (trace_metadata) { + // Barrier to make sure all workers passed + // the strong CLD and strong nmethods phases. + active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads()); + + // Now take the complement of the strong CLDs. + ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds); + } + // Finish up any enqueued closure apps (attributed as object copy time). buf_scan_non_heap_roots.done(); - - double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds(); + buf_scan_non_heap_weak_roots.done(); + + double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds() + + buf_scan_non_heap_weak_roots.closure_app_seconds(); g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); @@ -5181,32 +4889,14 @@ } g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms); - // If this is an initial mark pause, and we're not scanning - // the entire code cache, we need to mark the oops in the - // strong code root lists for the regions that are not in - // the collection set. - // Note all threads participate in this set of root tasks. - double mark_strong_code_roots_ms = 0.0; - if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) { - double mark_strong_roots_start = os::elapsedTime(); - mark_strong_code_roots(worker_i); - mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0; - } - g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms); - // Now scan the complement of the collection set. - if (scan_rs != NULL) { - g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i); - } + G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots); + + g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i); + _process_strong_tasks->all_tasks_completed(); } -void -G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) { - CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); - SharedHeap::process_weak_roots(root_closure, &roots_in_blobs); -} - class G1StringSymbolTableUnlinkTask : public AbstractGangTask { private: BoolObjectClosure* _is_alive; @@ -5224,7 +4914,8 @@ bool _do_in_parallel; public: G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) : - AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive), + AbstractGangTask("String/Symbol Unlinking"), + _is_alive(is_alive), _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()), _process_strings(process_strings), _strings_processed(0), _strings_removed(0), _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) { @@ -5246,6 +4937,14 @@ guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT, SymbolTable::parallel_claimed_index(), _initial_symbol_table_size)); + + if (G1TraceStringSymbolTableScrubbing) { + gclog_or_tty->print_cr("Cleaned string and symbol table, " + "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, " + "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed", + strings_processed(), strings_removed(), + symbols_processed(), symbols_removed()); + } } void work(uint worker_id) { @@ -5281,12 +4980,300 @@ size_t symbols_removed() const { return (size_t)_symbols_removed; } }; -void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, - bool process_strings, bool process_symbols) { +class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC { +private: + static Monitor* _lock; + + BoolObjectClosure* const _is_alive; + const bool _unloading_occurred; + const uint _num_workers; + + // Variables used to claim nmethods. + nmethod* _first_nmethod; + volatile nmethod* _claimed_nmethod; + + // The list of nmethods that need to be processed by the second pass. + volatile nmethod* _postponed_list; + volatile uint _num_entered_barrier; + + public: + G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) : + _is_alive(is_alive), + _unloading_occurred(unloading_occurred), + _num_workers(num_workers), + _first_nmethod(NULL), + _claimed_nmethod(NULL), + _postponed_list(NULL), + _num_entered_barrier(0) + { + nmethod::increase_unloading_clock(); + _first_nmethod = CodeCache::alive_nmethod(CodeCache::first()); + _claimed_nmethod = (volatile nmethod*)_first_nmethod; + } + + ~G1CodeCacheUnloadingTask() { + CodeCache::verify_clean_inline_caches(); + + CodeCache::set_needs_cache_clean(false); + guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be"); + + CodeCache::verify_icholder_relocations(); + } + + private: + void add_to_postponed_list(nmethod* nm) { + nmethod* old; + do { + old = (nmethod*)_postponed_list; + nm->set_unloading_next(old); + } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old); + } + + void clean_nmethod(nmethod* nm) { + bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred); + + if (postponed) { + // This nmethod referred to an nmethod that has not been cleaned/unloaded yet. + add_to_postponed_list(nm); + } + + // Mark that this thread has been cleaned/unloaded. + // After this call, it will be safe to ask if this nmethod was unloaded or not. + nm->set_unloading_clock(nmethod::global_unloading_clock()); + } + + void clean_nmethod_postponed(nmethod* nm) { + nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred); + } + + static const int MaxClaimNmethods = 16; + + void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) { + nmethod* first; + nmethod* last; + + do { + *num_claimed_nmethods = 0; + + first = last = (nmethod*)_claimed_nmethod; + + if (first != NULL) { + for (int i = 0; i < MaxClaimNmethods; i++) { + last = CodeCache::alive_nmethod(CodeCache::next(last)); + + if (last == NULL) { + break; + } + + claimed_nmethods[i] = last; + (*num_claimed_nmethods)++; + } + } + + } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first); + } + + nmethod* claim_postponed_nmethod() { + nmethod* claim; + nmethod* next; + + do { + claim = (nmethod*)_postponed_list; + if (claim == NULL) { + return NULL; + } + + next = claim->unloading_next(); + + } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim); + + return claim; + } + + public: + // Mark that we're done with the first pass of nmethod cleaning. + void barrier_mark(uint worker_id) { + MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); + _num_entered_barrier++; + if (_num_entered_barrier == _num_workers) { + ml.notify_all(); + } + } + + // See if we have to wait for the other workers to + // finish their first-pass nmethod cleaning work. + void barrier_wait(uint worker_id) { + if (_num_entered_barrier < _num_workers) { + MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); + while (_num_entered_barrier < _num_workers) { + ml.wait(Mutex::_no_safepoint_check_flag, 0, false); + } + } + } + + // Cleaning and unloading of nmethods. Some work has to be postponed + // to the second pass, when we know which nmethods survive. + void work_first_pass(uint worker_id) { + // The first nmethods is claimed by the first worker. + if (worker_id == 0 && _first_nmethod != NULL) { + clean_nmethod(_first_nmethod); + _first_nmethod = NULL; + } + + int num_claimed_nmethods; + nmethod* claimed_nmethods[MaxClaimNmethods]; + + while (true) { + claim_nmethods(claimed_nmethods, &num_claimed_nmethods); + + if (num_claimed_nmethods == 0) { + break; + } + + for (int i = 0; i < num_claimed_nmethods; i++) { + clean_nmethod(claimed_nmethods[i]); + } + } + + // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark. + // Need to retire the buffers now that this thread has stopped cleaning nmethods. + MetadataOnStackMark::retire_buffer_for_thread(Thread::current()); + } + + void work_second_pass(uint worker_id) { + nmethod* nm; + // Take care of postponed nmethods. + while ((nm = claim_postponed_nmethod()) != NULL) { + clean_nmethod_postponed(nm); + } + } +}; + +Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock"); + +class G1KlassCleaningTask : public StackObj { + BoolObjectClosure* _is_alive; + volatile jint _clean_klass_tree_claimed; + ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator; + + public: + G1KlassCleaningTask(BoolObjectClosure* is_alive) : + _is_alive(is_alive), + _clean_klass_tree_claimed(0), + _klass_iterator() { + } + + private: + bool claim_clean_klass_tree_task() { + if (_clean_klass_tree_claimed) { + return false; + } + + return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0; + } + + InstanceKlass* claim_next_klass() { + Klass* klass; + do { + klass =_klass_iterator.next_klass(); + } while (klass != NULL && !klass->oop_is_instance()); + + return (InstanceKlass*)klass; + } + +public: + + void clean_klass(InstanceKlass* ik) { + ik->clean_implementors_list(_is_alive); + ik->clean_method_data(_is_alive); + + // G1 specific cleanup work that has + // been moved here to be done in parallel. + ik->clean_dependent_nmethods(); + if (JvmtiExport::has_redefined_a_class()) { + InstanceKlass::purge_previous_versions(ik); + } + } + + void work() { + ResourceMark rm; + + // One worker will clean the subklass/sibling klass tree. + if (claim_clean_klass_tree_task()) { + Klass::clean_subklass_tree(_is_alive); + } + + // All workers will help cleaning the classes, + InstanceKlass* klass; + while ((klass = claim_next_klass()) != NULL) { + clean_klass(klass); + } + } +}; + +// To minimize the remark pause times, the tasks below are done in parallel. +class G1ParallelCleaningTask : public AbstractGangTask { +private: + G1StringSymbolTableUnlinkTask _string_symbol_task; + G1CodeCacheUnloadingTask _code_cache_task; + G1KlassCleaningTask _klass_cleaning_task; + +public: + // The constructor is run in the VMThread. + G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) : + AbstractGangTask("Parallel Cleaning"), + _string_symbol_task(is_alive, process_strings, process_symbols), + _code_cache_task(num_workers, is_alive, unloading_occurred), + _klass_cleaning_task(is_alive) { + } + + void pre_work_verification() { + // The VM Thread will have registered Metadata during the single-threaded phase of MetadataStackOnMark. + assert(Thread::current()->is_VM_thread() + || !MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty"); + } + + void post_work_verification() { + assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty"); + } + + // The parallel work done by all worker threads. + void work(uint worker_id) { + pre_work_verification(); + + // Do first pass of code cache cleaning. + _code_cache_task.work_first_pass(worker_id); + + // Let the threads mark that the first pass is done. + _code_cache_task.barrier_mark(worker_id); + + // Clean the Strings and Symbols. + _string_symbol_task.work(worker_id); + + // Wait for all workers to finish the first code cache cleaning pass. + _code_cache_task.barrier_wait(worker_id); + + // Do the second code cache cleaning work, which realize on + // the liveness information gathered during the first pass. + _code_cache_task.work_second_pass(worker_id); + + // Clean all klasses that were not unloaded. + _klass_cleaning_task.work(); + + post_work_verification(); + } +}; + + +void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive, + bool process_strings, + bool process_symbols, + bool class_unloading_occurred) { uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? - _g1h->workers()->active_workers() : 1); - - G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols); + workers()->active_workers() : 1); + + G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols, + n_workers, class_unloading_occurred); if (G1CollectedHeap::use_parallel_gc_threads()) { set_par_threads(n_workers); workers()->run_task(&g1_unlink_task); @@ -5294,12 +5281,21 @@ } else { g1_unlink_task.work(0); } - if (G1TraceStringSymbolTableScrubbing) { - gclog_or_tty->print_cr("Cleaned string and symbol table, " - "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, " - "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed", - g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(), - g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed()); +} + +void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, + bool process_strings, bool process_symbols) { + { + uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? + _g1h->workers()->active_workers() : 1); + G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols); + if (G1CollectedHeap::use_parallel_gc_threads()) { + set_par_threads(n_workers); + workers()->run_task(&g1_unlink_task); + set_par_threads(0); + } else { + g1_unlink_task.work(0); + } } if (G1StringDedup::is_enabled()) { @@ -5307,21 +5303,43 @@ } } -class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { -public: - bool do_card_ptr(jbyte* card_ptr, uint worker_i) { - *card_ptr = CardTableModRefBS::dirty_card_val(); - return true; +class G1RedirtyLoggedCardsTask : public AbstractGangTask { + private: + DirtyCardQueueSet* _queue; + public: + G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { } + + virtual void work(uint worker_id) { + double start_time = os::elapsedTime(); + + RedirtyLoggedCardTableEntryClosure cl; + if (G1CollectedHeap::heap()->use_parallel_gc_threads()) { + _queue->par_apply_closure_to_all_completed_buffers(&cl); + } else { + _queue->apply_closure_to_all_completed_buffers(&cl); + } + + G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times(); + timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0); + timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed()); } }; void G1CollectedHeap::redirty_logged_cards() { - guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates."); double redirty_logged_cards_start = os::elapsedTime(); - RedirtyLoggedCardTableEntryFastClosure redirty; - dirty_card_queue_set().set_closure(&redirty); - dirty_card_queue_set().apply_closure_to_all_completed_buffers(); + uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? + _g1h->workers()->active_workers() : 1); + + G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set()); + dirty_card_queue_set().reset_for_par_iteration(); + if (use_parallel_gc_threads()) { + set_par_threads(n_workers); + workers()->run_task(&redirty_task); + set_par_threads(0); + } else { + redirty_task.work(0); + } DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); dcq.merge_bufferlists(&dirty_card_queue_set()); @@ -5360,12 +5378,22 @@ public: G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } - void do_oop( oop* p) { + void do_oop(oop* p) { oop obj = *p; - - if (_g1->obj_in_cs(obj)) { + assert(obj != NULL, "the caller should have filtered out NULL values"); + + G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj); + if (cset_state == G1CollectedHeap::InNeither) { + return; + } + if (cset_state == G1CollectedHeap::InCSet) { assert( obj->is_forwarded(), "invariant" ); *p = obj->forwardee(); + } else { + assert(!obj->is_forwarded(), "invariant" ); + assert(cset_state == G1CollectedHeap::IsHumongous, + err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state)); + _g1->set_humongous_is_live(obj); } } }; @@ -5378,17 +5406,14 @@ class G1CopyingKeepAliveClosure: public OopClosure { G1CollectedHeap* _g1h; OopClosure* _copy_non_heap_obj_cl; - OopsInHeapRegionClosure* _copy_metadata_obj_cl; G1ParScanThreadState* _par_scan_state; public: G1CopyingKeepAliveClosure(G1CollectedHeap* g1h, OopClosure* non_heap_obj_cl, - OopsInHeapRegionClosure* metadata_obj_cl, G1ParScanThreadState* pss): _g1h(g1h), _copy_non_heap_obj_cl(non_heap_obj_cl), - _copy_metadata_obj_cl(metadata_obj_cl), _par_scan_state(pss) {} @@ -5398,7 +5423,7 @@ template void do_oop_work(T* p) { oop obj = oopDesc::load_decode_heap_oop(p); - if (_g1h->obj_in_cs(obj)) { + if (_g1h->is_in_cset_or_humongous(obj)) { // If the referent object has been forwarded (either copied // to a new location or to itself in the event of an // evacuation failure) then we need to update the reference @@ -5421,12 +5446,12 @@ _par_scan_state->push_on_queue(p); } else { assert(!Metaspace::contains((const void*)p), - err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) " + err_msg("Unexpectedly found a pointer from metadata: " PTR_FORMAT, p)); - _copy_non_heap_obj_cl->do_oop(p); - } + _copy_non_heap_obj_cl->do_oop(p); } } + } }; // Serial drain queue closure. Called as the 'complete_gc' @@ -5516,22 +5541,18 @@ pss.set_evac_failure_closure(&evac_failure_cl); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); - G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); - G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; - OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl; if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = ©_mark_non_heap_cl; - copy_metadata_cl = ©_mark_metadata_cl; } // Keep alive closure. - G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss); + G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss); // Complete GC closure G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator); @@ -5622,22 +5643,17 @@ pss.set_evac_failure_closure(&evac_failure_cl); - assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); - + assert(pss.queue_is_empty(), "both queue and overflow should be empty"); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); - G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); - G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; - OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl; if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = ©_mark_non_heap_cl; - copy_metadata_cl = ©_mark_metadata_cl; } // Is alive closure @@ -5645,7 +5661,7 @@ // Copying keep alive closure. Applied to referent objects that need // to be copied. - G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss); + G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss); ReferenceProcessor* rp = _g1h->ref_processor_cm(); @@ -5681,7 +5697,7 @@ G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator); drain_queue.do_void(); // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure - assert(pss.refs()->is_empty(), "should be"); + assert(pss.queue_is_empty(), "should be"); } }; @@ -5748,25 +5764,21 @@ pss.set_evac_failure_closure(&evac_failure_cl); - assert(pss.refs()->is_empty(), "pre-condition"); + assert(pss.queue_is_empty(), "pre-condition"); G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); - G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL); - G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; - OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl; if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = ©_mark_non_heap_cl; - copy_metadata_cl = ©_mark_metadata_cl; } // Keep alive closure. - G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss); + G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss); // Serial Complete GC closure G1STWDrainQueueClosure drain_queue(this, &pss); @@ -5781,7 +5793,8 @@ &keep_alive, &drain_queue, NULL, - _gc_timer_stw); + _gc_timer_stw, + _gc_tracer_stw->gc_id()); } else { // Parallel reference processing assert(rp->num_q() == no_of_gc_workers, "sanity"); @@ -5792,15 +5805,14 @@ &keep_alive, &drain_queue, &par_task_executor, - _gc_timer_stw); + _gc_timer_stw, + _gc_tracer_stw->gc_id()); } _gc_tracer_stw->report_gc_reference_stats(stats); - // We have completed copying any necessary live referent objects - // (that were not copied during the actual pause) so we can - // retire any active alloc buffers - pss.retire_alloc_buffers(); - assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); + + // We have completed copying any necessary live referent objects. + assert(pss.queue_is_empty(), "both queue and overflow should be empty"); double ref_proc_time = os::elapsedTime() - ref_proc_start; g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); @@ -5885,6 +5897,10 @@ { StrongRootsScope srs(this); + // InitialMark needs claim bits to keep track of the marked-through CLDs. + if (g1_policy()->during_initial_mark_pause()) { + ClassLoaderDataGraph::clear_claimed_marks(); + } if (G1CollectedHeap::use_parallel_gc_threads()) { // The individual threads will set their evac-failure closures. @@ -5933,7 +5949,7 @@ } } - release_gc_alloc_regions(n_workers, evacuation_info); + _allocator->release_gc_alloc_regions(n_workers, evacuation_info); g1_rem_set()->cleanup_after_oops_into_collection_set_do(); // Reset and re-enable the hot card cache. @@ -5942,12 +5958,6 @@ hot_card_cache->reset_hot_cache(); hot_card_cache->set_use_cache(true); - // Migrate the strong code roots attached to each region in - // the collection set. Ideally we would like to do this - // after we have finished the scanning/evacuation of the - // strong code roots for a particular heap region. - migrate_strong_code_roots(); - purge_code_root_memory(); if (g1_policy()->during_initial_mark_pause()) { @@ -5975,9 +5985,7 @@ // RSets. enqueue_discovered_references(n_workers); - if (G1DeferredRSUpdate) { - redirty_logged_cards(); - } + redirty_logged_cards(); COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); } @@ -5985,10 +5993,16 @@ FreeRegionList* free_list, bool par, bool locked) { - assert(!hr->isHumongous(), "this is only for non-humongous regions"); + assert(!hr->is_free(), "the region should not be free"); assert(!hr->is_empty(), "the region should not be empty"); + assert(_hrm.is_available(hr->hrm_index()), "region should be committed"); assert(free_list != NULL, "pre-condition"); + if (G1VerifyBitmaps) { + MemRegion mr(hr->bottom(), hr->end()); + concurrent_mark()->clearRangePrevBitmap(mr); + } + // Clear the card counts for this region. // Note: we only need to do this if the region is not young // (since we don't refine cards in young regions). @@ -6009,14 +6023,14 @@ // We need to read this before we make the region non-humongous, // otherwise the information will be gone. uint last_index = hr->last_hc_index(); - hr->set_notHumongous(); + hr->clear_humongous(); free_region(hr, free_list, par); - uint i = hr->hrs_index() + 1; + uint i = hr->hrm_index() + 1; while (i < last_index) { HeapRegion* curr_hr = region_at(i); assert(curr_hr->continuesHumongous(), "invariant"); - curr_hr->set_notHumongous(); + curr_hr->clear_humongous(); free_region(curr_hr, free_list, par); i += 1; } @@ -6036,15 +6050,12 @@ assert(list != NULL, "list can't be null"); if (!list->is_empty()) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - _free_list.add_ordered(list); + _hrm.insert_list_into_free_list(list); } } void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { - assert(_summary_bytes_used >= bytes, - err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT, - _summary_bytes_used, bytes)); - _summary_bytes_used -= bytes; + _allocator->decrease_used(bytes); } class G1ParCleanupCTTask : public AbstractGangTask { @@ -6123,7 +6134,87 @@ void G1CollectedHeap::verify_dirty_young_regions() { verify_dirty_young_list(_young_list->first_region()); } -#endif + +bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap, + HeapWord* tams, HeapWord* end) { + guarantee(tams <= end, + err_msg("tams: "PTR_FORMAT" end: "PTR_FORMAT, tams, end)); + HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end); + if (result < end) { + gclog_or_tty->cr(); + gclog_or_tty->print_cr("## wrong marked address on %s bitmap: "PTR_FORMAT, + bitmap_name, result); + gclog_or_tty->print_cr("## %s tams: "PTR_FORMAT" end: "PTR_FORMAT, + bitmap_name, tams, end); + return false; + } + return true; +} + +bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) { + CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap(); + CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap(); + + HeapWord* bottom = hr->bottom(); + HeapWord* ptams = hr->prev_top_at_mark_start(); + HeapWord* ntams = hr->next_top_at_mark_start(); + HeapWord* end = hr->end(); + + bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end); + + bool res_n = true; + // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window + // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap + // if we happen to be in that state. + if (mark_in_progress() || !_cmThread->in_progress()) { + res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end); + } + if (!res_p || !res_n) { + gclog_or_tty->print_cr("#### Bitmap verification failed for "HR_FORMAT, + HR_FORMAT_PARAMS(hr)); + gclog_or_tty->print_cr("#### Caller: %s", caller); + return false; + } + return true; +} + +void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) { + if (!G1VerifyBitmaps) return; + + guarantee(verify_bitmaps(caller, hr), "bitmap verification"); +} + +class G1VerifyBitmapClosure : public HeapRegionClosure { +private: + const char* _caller; + G1CollectedHeap* _g1h; + bool _failures; + +public: + G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) : + _caller(caller), _g1h(g1h), _failures(false) { } + + bool failures() { return _failures; } + + virtual bool doHeapRegion(HeapRegion* hr) { + if (hr->continuesHumongous()) return false; + + bool result = _g1h->verify_bitmaps(_caller, hr); + if (!result) { + _failures = true; + } + return false; + } +}; + +void G1CollectedHeap::check_bitmaps(const char* caller) { + if (!G1VerifyBitmaps) return; + + G1VerifyBitmapClosure cl(caller, this); + heap_region_iterate(&cl); + guarantee(!cl.failures(), "bitmap verification"); +} +#endif // PRODUCT void G1CollectedHeap::cleanUpCardTable() { G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); @@ -6244,9 +6335,9 @@ if (cur->is_young()) { cur->set_young_index_in_cset(-1); } - cur->set_not_young(); cur->set_evacuation_failed(false); // The region is now considered to be old. + cur->set_old(); _old_set.add(cur); evacuation_info.increment_collectionset_used_after(cur->used()); } @@ -6272,6 +6363,154 @@ policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms); } +class G1FreeHumongousRegionClosure : public HeapRegionClosure { + private: + FreeRegionList* _free_region_list; + HeapRegionSet* _proxy_set; + HeapRegionSetCount _humongous_regions_removed; + size_t _freed_bytes; + public: + + G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) : + _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) { + } + + virtual bool doHeapRegion(HeapRegion* r) { + if (!r->startsHumongous()) { + return false; + } + + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + oop obj = (oop)r->bottom(); + CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap(); + + // The following checks whether the humongous object is live are sufficient. + // The main additional check (in addition to having a reference from the roots + // or the young gen) is whether the humongous object has a remembered set entry. + // + // A humongous object cannot be live if there is no remembered set for it + // because: + // - there can be no references from within humongous starts regions referencing + // the object because we never allocate other objects into them. + // (I.e. there are no intra-region references that may be missed by the + // remembered set) + // - as soon there is a remembered set entry to the humongous starts region + // (i.e. it has "escaped" to an old object) this remembered set entry will stay + // until the end of a concurrent mark. + // + // It is not required to check whether the object has been found dead by marking + // or not, in fact it would prevent reclamation within a concurrent cycle, as + // all objects allocated during that time are considered live. + // SATB marking is even more conservative than the remembered set. + // So if at this point in the collection there is no remembered set entry, + // nobody has a reference to it. + // At the start of collection we flush all refinement logs, and remembered sets + // are completely up-to-date wrt to references to the humongous object. + // + // Other implementation considerations: + // - never consider object arrays: while they are a valid target, they have not + // been observed to be used as temporary objects. + // - they would also pose considerable effort for cleaning up the the remembered + // sets. + // While this cleanup is not strictly necessary to be done (or done instantly), + // given that their occurrence is very low, this saves us this additional + // complexity. + uint region_idx = r->hrm_index(); + if (g1h->humongous_is_live(region_idx) || + g1h->humongous_region_is_always_live(region_idx)) { + + if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) { + gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d", + r->isHumongous(), + region_idx, + r->rem_set()->occupied(), + r->rem_set()->strong_code_roots_list_length(), + next_bitmap->isMarked(r->bottom()), + g1h->humongous_is_live(region_idx), + obj->is_objArray() + ); + } + + return false; + } + + guarantee(!obj->is_objArray(), + err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.", + r->bottom())); + + if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) { + gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d", + r->isHumongous(), + r->bottom(), + region_idx, + r->region_num(), + r->rem_set()->occupied(), + r->rem_set()->strong_code_roots_list_length(), + next_bitmap->isMarked(r->bottom()), + g1h->humongous_is_live(region_idx), + obj->is_objArray() + ); + } + // Need to clear mark bit of the humongous object if already set. + if (next_bitmap->isMarked(r->bottom())) { + next_bitmap->clear(r->bottom()); + } + _freed_bytes += r->used(); + r->set_containing_set(NULL); + _humongous_regions_removed.increment(1u, r->capacity()); + g1h->free_humongous_region(r, _free_region_list, false); + + return false; + } + + HeapRegionSetCount& humongous_free_count() { + return _humongous_regions_removed; + } + + size_t bytes_freed() const { + return _freed_bytes; + } + + size_t humongous_reclaimed() const { + return _humongous_regions_removed.length(); + } +}; + +void G1CollectedHeap::eagerly_reclaim_humongous_regions() { + assert_at_safepoint(true); + + if (!G1ReclaimDeadHumongousObjectsAtYoungGC || !_has_humongous_reclaim_candidates) { + g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0); + return; + } + + double start_time = os::elapsedTime(); + + FreeRegionList local_cleanup_list("Local Humongous Cleanup List"); + + G1FreeHumongousRegionClosure cl(&local_cleanup_list); + heap_region_iterate(&cl); + + HeapRegionSetCount empty_set; + remove_from_old_sets(empty_set, cl.humongous_free_count()); + + G1HRPrinter* hr_printer = _g1h->hr_printer(); + if (hr_printer->is_active()) { + FreeRegionListIterator iter(&local_cleanup_list); + while (iter.more_available()) { + HeapRegion* hr = iter.get_next(); + hr_printer->cleanup(hr); + } + } + + prepend_to_freelist(&local_cleanup_list); + decrement_summary_bytes(cl.bytes_freed()); + + g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0, + cl.humongous_reclaimed()); +} + // This routine is similar to the above but does not record // any policy statistics or update free lists; we are abandoning // the current incremental collection set in preparation of a @@ -6385,16 +6624,15 @@ TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { } bool doHeapRegion(HeapRegion* r) { - if (r->is_empty()) { - // We ignore empty regions, we'll empty the free list afterwards - } else if (r->is_young()) { - // We ignore young regions, we'll empty the young list afterwards - } else if (r->isHumongous()) { + if (r->is_old()) { + _old_set->remove(r); + } else { + // We ignore free regions, we'll empty the free list afterwards. + // We ignore young regions, we'll empty the young list afterwards. // We ignore humongous regions, we're not tearing down the - // humongous region set - } else { - // The rest should be old - _old_set->remove(r); + // humongous regions set. + assert(r->is_free() || r->is_young() || r->isHumongous(), + "it cannot be another type"); } return false; } @@ -6416,22 +6654,22 @@ // this is that during a full GC string deduplication needs to know if // a collected region was young or old when the full GC was initiated. } - _free_list.remove_all(); + _hrm.remove_all_free_regions(); } class RebuildRegionSetsClosure : public HeapRegionClosure { private: bool _free_list_only; HeapRegionSet* _old_set; - FreeRegionList* _free_list; + HeapRegionManager* _hrm; size_t _total_used; public: RebuildRegionSetsClosure(bool free_list_only, - HeapRegionSet* old_set, FreeRegionList* free_list) : + HeapRegionSet* old_set, HeapRegionManager* hrm) : _free_list_only(free_list_only), - _old_set(old_set), _free_list(free_list), _total_used(0) { - assert(_free_list->is_empty(), "pre-condition"); + _old_set(old_set), _hrm(hrm), _total_used(0) { + assert(_hrm->num_free_regions() == 0, "pre-condition"); if (!free_list_only) { assert(_old_set->is_empty(), "pre-condition"); } @@ -6444,14 +6682,20 @@ if (r->is_empty()) { // Add free regions to the free list - _free_list->add_as_tail(r); + r->set_free(); + r->set_allocation_context(AllocationContext::system()); + _hrm->insert_into_free_list(r); } else if (!_free_list_only) { assert(!r->is_young(), "we should not come across young regions"); if (r->isHumongous()) { // We ignore humongous regions, we left the humongous set unchanged } else { - // The rest should be old, add them to the old set + // Objects that were compacted would have ended up on regions + // that were previously old or free. + assert(r->is_free() || r->is_old(), "invariant"); + // We now consider them old, so register as such. + r->set_old(); _old_set->add(r); } _total_used += r->used(); @@ -6472,16 +6716,16 @@ _young_list->empty_list(); } - RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list); + RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); heap_region_iterate(&cl); if (!free_list_only) { - _summary_bytes_used = cl.total_used(); - } - assert(_summary_bytes_used == recalculate_used(), - err_msg("inconsistent _summary_bytes_used, " + _allocator->set_used(cl.total_used()); + } + assert(_allocator->used_unlocked() == recalculate_used(), + err_msg("inconsistent _allocator->used_unlocked(), " "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT, - _summary_bytes_used, recalculate_used())); + _allocator->used_unlocked(), recalculate_used())); } void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { @@ -6490,11 +6734,7 @@ bool G1CollectedHeap::is_in_closed_subset(const void* p) const { HeapRegion* hr = heap_region_containing(p); - if (hr == NULL) { - return false; - } else { - return hr->is_in(p); - } + return hr->is_in(p); } // Methods for the mutator alloc region @@ -6512,6 +6752,7 @@ if (new_alloc_region != NULL) { set_region_short_lived_locked(new_alloc_region); _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full); + check_bitmaps("Mutator Region Allocation", new_alloc_region); return new_alloc_region; } } @@ -6521,10 +6762,10 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - assert(alloc_region->is_young(), "all mutator alloc regions should be young"); + assert(alloc_region->is_eden(), "all mutator alloc regions should be eden"); g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); - _summary_bytes_used += allocated_bytes; + _allocator->increase_used(allocated_bytes); _hr_printer.retire(alloc_region); // We update the eden sizes here, when the region is retired, // instead of when it's allocated, since this is the point that its @@ -6532,11 +6773,6 @@ g1mm()->update_eden_size(); } -HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, - bool force) { - return _g1h->new_mutator_alloc_region(word_size, force); -} - void G1CollectedHeap::set_par_threads() { // Don't change the number of workers. Use the value previously set // in the workgroup. @@ -6553,11 +6789,6 @@ set_par_threads(n_workers); } -void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, - size_t allocated_bytes) { - _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); -} - // Methods for the GC alloc regions HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, @@ -6574,12 +6805,15 @@ // We really only need to do this for old regions given that we // should never scan survivors. But it doesn't hurt to do it // for survivors too. - new_alloc_region->set_saved_mark(); + new_alloc_region->record_top_and_timestamp(); if (survivor) { new_alloc_region->set_survivor(); _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); + check_bitmaps("Survivor Region Allocation", new_alloc_region); } else { + new_alloc_region->set_old(); _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); + check_bitmaps("Old Region Allocation", new_alloc_region); } bool during_im = g1_policy()->during_initial_mark_pause(); new_alloc_region->note_start_of_copying(during_im); @@ -6605,65 +6839,13 @@ _hr_printer.retire(alloc_region); } -HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size, - bool force) { - assert(!force, "not supported for GC alloc regions"); - return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived); -} - -void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region, - size_t allocated_bytes) { - _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, - GCAllocForSurvived); -} - -HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size, - bool force) { - assert(!force, "not supported for GC alloc regions"); - return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured); -} - -void OldGCAllocRegion::retire_region(HeapRegion* alloc_region, - size_t allocated_bytes) { - _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, - GCAllocForTenured); -} - -HeapRegion* OldGCAllocRegion::release() { - HeapRegion* cur = get(); - if (cur != NULL) { - // Determine how far we are from the next card boundary. If it is smaller than - // the minimum object size we can allocate into, expand into the next card. - HeapWord* top = cur->top(); - HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes); - - size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize); - - if (to_allocate_words != 0) { - // We are not at a card boundary. Fill up, possibly into the next, taking the - // end of the region and the minimum object size into account. - to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize), - MAX2(to_allocate_words, G1CollectedHeap::min_fill_size())); - - // Skip allocation if there is not enough space to allocate even the smallest - // possible object. In this case this region will not be retained, so the - // original problem cannot occur. - if (to_allocate_words >= G1CollectedHeap::min_fill_size()) { - HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */); - CollectedHeap::fill_with_object(dummy, to_allocate_words); - } - } - } - return G1AllocRegion::release(); -} - // Heap region set verification class VerifyRegionListsClosure : public HeapRegionClosure { private: HeapRegionSet* _old_set; HeapRegionSet* _humongous_set; - FreeRegionList* _free_list; + HeapRegionManager* _hrm; public: HeapRegionSetCount _old_count; @@ -6672,8 +6854,8 @@ VerifyRegionListsClosure(HeapRegionSet* old_set, HeapRegionSet* humongous_set, - FreeRegionList* free_list) : - _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list), + HeapRegionManager* hrm) : + _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm), _old_count(), _humongous_count(), _free_count(){ } bool doHeapRegion(HeapRegion* hr) { @@ -6684,19 +6866,21 @@ if (hr->is_young()) { // TODO } else if (hr->startsHumongous()) { - assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num())); + assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index())); _humongous_count.increment(1u, hr->capacity()); } else if (hr->is_empty()) { - assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num())); + assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index())); _free_count.increment(1u, hr->capacity()); + } else if (hr->is_old()) { + assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index())); + _old_count.increment(1u, hr->capacity()); } else { - assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num())); - _old_count.increment(1u, hr->capacity()); + ShouldNotReachHere(); } return false; } - void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) { + void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length())); guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, old_set->total_capacity_bytes(), _old_count.capacity())); @@ -6705,26 +6889,17 @@ guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, humongous_set->total_capacity_bytes(), _humongous_count.capacity())); - guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length())); + guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length())); guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, free_list->total_capacity_bytes(), _free_count.capacity())); } }; -HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index, - HeapWord* bottom) { - HeapWord* end = bottom + HeapRegion::GrainWords; - MemRegion mr(bottom, end); - assert(_g1_reserved.contains(mr), "invariant"); - // This might return NULL if the allocation fails - return new HeapRegion(hrs_index, _bot_shared, mr); -} - void G1CollectedHeap::verify_region_sets() { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); // First, check the explicit lists. - _free_list.verify_list(); + _hrm.verify(); { // Given that a concurrent operation might be adding regions to // the secondary free list we have to take the lock before @@ -6755,9 +6930,9 @@ // Finally, make sure that the region accounting in the lists is // consistent with what we see in the heap. - VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list); + VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm); heap_region_iterate(&cl); - cl.verify_counts(&_old_set, &_humongous_set, &_free_list); + cl.verify_counts(&_old_set, &_humongous_set, &_hrm); } // Optimized nmethod scanning @@ -6776,13 +6951,8 @@ " starting at "HR_FORMAT, _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()))); - // HeapRegion::add_strong_code_root() avoids adding duplicate - // entries but having duplicates is OK since we "mark" nmethods - // as visited when we scan the strong code root lists during the GC. - hr->add_strong_code_root(_nm); - assert(hr->rem_set()->strong_code_roots_list_contains(_nm), - err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT, - _nm, HR_FORMAT_PARAMS(hr))); + // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries. + hr->add_strong_code_root_locked(_nm); } } @@ -6809,9 +6979,6 @@ _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()))); hr->remove_strong_code_root(_nm); - assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), - err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT, - _nm, HR_FORMAT_PARAMS(hr))); } } @@ -6839,132 +7006,13 @@ nm->oops_do(®_cl, true); } -class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure { -public: - bool doHeapRegion(HeapRegion *hr) { - assert(!hr->isHumongous(), - err_msg("humongous region "HR_FORMAT" should not have been added to collection set", - HR_FORMAT_PARAMS(hr))); - hr->migrate_strong_code_roots(); - return false; - } -}; - -void G1CollectedHeap::migrate_strong_code_roots() { - MigrateCodeRootsHeapRegionClosure cl; - double migrate_start = os::elapsedTime(); - collection_set_iterate(&cl); - double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0; - g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms); -} - void G1CollectedHeap::purge_code_root_memory() { double purge_start = os::elapsedTime(); - G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent); + G1CodeRootSet::purge(); double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0; g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms); } -// Mark all the code roots that point into regions *not* in the -// collection set. -// -// Note we do not want to use a "marking" CodeBlobToOopClosure while -// walking the the code roots lists of regions not in the collection -// set. Suppose we have an nmethod (M) that points to objects in two -// separate regions - one in the collection set (R1) and one not (R2). -// Using a "marking" CodeBlobToOopClosure here would result in "marking" -// nmethod M when walking the code roots for R1. When we come to scan -// the code roots for R2, we would see that M is already marked and it -// would be skipped and the objects in R2 that are referenced from M -// would not be evacuated. - -class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure { - - class MarkStrongCodeRootOopClosure: public OopClosure { - ConcurrentMark* _cm; - HeapRegion* _hr; - uint _worker_id; - - template void do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - // Only mark objects in the region (which is assumed - // to be not in the collection set). - if (_hr->is_in(obj)) { - _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); - } - } - } - - public: - MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) : - _cm(cm), _hr(hr), _worker_id(worker_id) { - assert(!_hr->in_collection_set(), "sanity"); - } - - void do_oop(narrowOop* p) { do_oop_work(p); } - void do_oop(oop* p) { do_oop_work(p); } - }; - - MarkStrongCodeRootOopClosure _oop_cl; - -public: - MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id): - _oop_cl(cm, hr, worker_id) {} - - void do_code_blob(CodeBlob* cb) { - nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); - if (nm != NULL) { - nm->oops_do(&_oop_cl); - } - } -}; - -class MarkStrongCodeRootsHRClosure: public HeapRegionClosure { - G1CollectedHeap* _g1h; - uint _worker_id; - -public: - MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) : - _g1h(g1h), _worker_id(worker_id) {} - - bool doHeapRegion(HeapRegion *hr) { - HeapRegionRemSet* hrrs = hr->rem_set(); - if (hr->continuesHumongous()) { - // Code roots should never be attached to a continuation of a humongous region - assert(hrrs->strong_code_roots_list_length() == 0, - err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT - " starting at "HR_FORMAT", but has "SIZE_FORMAT, - HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()), - hrrs->strong_code_roots_list_length())); - return false; - } - - if (hr->in_collection_set()) { - // Don't mark code roots into regions in the collection set here. - // They will be marked when we scan them. - return false; - } - - MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id); - hr->strong_code_roots_do(&cb_cl); - return false; - } -}; - -void G1CollectedHeap::mark_strong_code_roots(uint worker_id) { - MarkStrongCodeRootsHRClosure cl(this, worker_id); - if (G1CollectedHeap::use_parallel_gc_threads()) { - heap_region_par_iterate_chunked(&cl, - worker_id, - workers()->active_workers(), - HeapRegion::ParMarkRootClaimValue); - } else { - heap_region_iterate(&cl); - } -} - class RebuildStrongCodeRootClosure: public CodeBlobClosure { G1CollectedHeap* _g1h; @@ -6978,7 +7026,7 @@ return; } - if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) { + if (ScavengeRootsInCode) { _g1h->register_nmethod(nm); } } --- ./hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,4 +1,4 @@ -/* + /* * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -25,15 +25,17 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP +#include "gc_implementation/g1/g1AllocationContext.hpp" +#include "gc_implementation/g1/g1Allocator.hpp" #include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/evacuationInfo.hpp" #include "gc_implementation/g1/g1AllocRegion.hpp" +#include "gc_implementation/g1/g1BiasedArray.hpp" #include "gc_implementation/g1/g1HRPrinter.hpp" #include "gc_implementation/g1/g1MonitoringSupport.hpp" -#include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/g1YCTypes.hpp" -#include "gc_implementation/g1/heapRegionSeq.hpp" +#include "gc_implementation/g1/heapRegionManager.hpp" #include "gc_implementation/g1/heapRegionSet.hpp" #include "gc_implementation/shared/hSpaceCounters.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp" @@ -80,12 +82,6 @@ typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) -enum GCAllocPurpose { - GCAllocForTenured, - GCAllocForSurvived, - GCAllocPurposeCount -}; - class YoungList : public CHeapObj { private: G1CollectedHeap* _g1h; @@ -158,40 +154,6 @@ void print(); }; -class MutatorAllocRegion : public G1AllocRegion { -protected: - virtual HeapRegion* allocate_new_region(size_t word_size, bool force); - virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); -public: - MutatorAllocRegion() - : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } -}; - -class SurvivorGCAllocRegion : public G1AllocRegion { -protected: - virtual HeapRegion* allocate_new_region(size_t word_size, bool force); - virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); -public: - SurvivorGCAllocRegion() - : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { } -}; - -class OldGCAllocRegion : public G1AllocRegion { -protected: - virtual HeapRegion* allocate_new_region(size_t word_size, bool force); - virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); -public: - OldGCAllocRegion() - : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } - - // This specialization of release() makes sure that the last card that has been - // allocated into has been completely filled by a dummy object. - // This avoids races when remembered set scanning wants to update the BOT of the - // last card in the retained old gc alloc region, and allocation threads - // allocating into that card at the same time. - virtual HeapRegion* release(); -}; - // The G1 STW is alive closure. // An instance is embedded into the G1CH and used as the // (optional) _is_alive_non_header closure in the STW @@ -206,7 +168,15 @@ class RefineCardTableEntryClosure; +class G1RegionMappingChangedListener : public G1MappingChangedListener { + private: + void reset_from_card_cache(uint start_idx, size_t num_regions); + public: + virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); +}; + class G1CollectedHeap : public SharedHeap { + friend class VM_CollectForMetadataAllocation; friend class VM_G1CollectForAllocation; friend class VM_G1CollectFull; friend class VM_G1IncCollectionPause; @@ -214,9 +184,12 @@ friend class MutatorAllocRegion; friend class SurvivorGCAllocRegion; friend class OldGCAllocRegion; + friend class G1Allocator; + friend class G1DefaultAllocator; + friend class G1ResManAllocator; // Closures used in implementation. - template + template friend class G1ParCopyClosure; friend class G1IsAliveClosure; friend class G1EvacuateFollowersClosure; @@ -224,6 +197,8 @@ friend class G1ParScanClosureSuper; friend class G1ParEvacuateFollowersClosure; friend class G1ParTask; + friend class G1ParGCAllocator; + friend class G1DefaultParGCAllocator; friend class G1FreeGarbageRegionClosure; friend class RefineCardTableEntryClosure; friend class G1PrepareCompactClosure; @@ -233,6 +208,7 @@ friend class EvacPopObjClosure; friend class G1ParCleanupCTTask; + friend class G1FreeHumongousRegionClosure; // Other related classes. friend class G1MarkSweep; @@ -242,19 +218,9 @@ static size_t _humongous_object_threshold_in_words; - // Storage for the G1 heap. - VirtualSpace _g1_storage; - MemRegion _g1_reserved; - - // The part of _g1_storage that is currently committed. - MemRegion _g1_committed; - - // The master free list. It will satisfy all new region allocations. - FreeRegionList _free_list; - // The secondary free list which contains regions that have been - // freed up during the cleanup process. This will be appended to the - // master free list when appropriate. + // freed up during the cleanup process. This will be appended to + // the master free list when appropriate. FreeRegionList _secondary_free_list; // It keeps track of the old regions. @@ -263,6 +229,9 @@ // It keeps track of the humongous regions. HeapRegionSet _humongous_set; + void clear_humongous_is_live_table(); + void eagerly_reclaim_humongous_regions(); + // The number of regions we could create by expansion. uint _expansion_regions; @@ -285,47 +254,24 @@ // after heap shrinking (free_list_only == true). void rebuild_region_sets(bool free_list_only); + // Callback for region mapping changed events. + G1RegionMappingChangedListener _listener; + // The sequence of all heap regions in the heap. - HeapRegionSeq _hrs; + HeapRegionManager _hrm; - // Alloc region used to satisfy mutator allocation requests. - MutatorAllocRegion _mutator_alloc_region; + // Class that handles the different kinds of allocations. + G1Allocator* _allocator; - // Alloc region used to satisfy allocation requests by the GC for - // survivor objects. - SurvivorGCAllocRegion _survivor_gc_alloc_region; + // Statistics for each allocation context + AllocationContextStats _allocation_context_stats; // PLAB sizing policy for survivors. PLABStats _survivor_plab_stats; - // Alloc region used to satisfy allocation requests by the GC for - // old objects. - OldGCAllocRegion _old_gc_alloc_region; - // PLAB sizing policy for tenured objects. PLABStats _old_plab_stats; - PLABStats* stats_for_purpose(GCAllocPurpose purpose) { - PLABStats* stats = NULL; - - switch (purpose) { - case GCAllocForSurvived: - stats = &_survivor_plab_stats; - break; - case GCAllocForTenured: - stats = &_old_plab_stats; - break; - default: - assert(false, "unrecognized GCAllocPurpose"); - } - - return stats; - } - - // The last old region we allocated to during the last GC. - // Typically, it is not full so we should re-use it during the next GC. - HeapRegion* _retained_old_gc_alloc_region; - // It specifies whether we should attempt to expand the heap after a // region allocation failure. If heap expansion fails we set this to // false so that we don't re-attempt the heap expansion (it's likely @@ -353,33 +299,25 @@ // Helper for monitoring and management support. G1MonitoringSupport* _g1mm; - // Determines PLAB size for a particular allocation purpose. - size_t desired_plab_sz(GCAllocPurpose purpose); + // Records whether the region at the given index is kept live by roots or + // references from the young generation. + class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray { + protected: + bool default_value() const { return false; } + public: + void clear() { G1BiasedMappedArray::clear(); } + void set_live(uint region) { + set_by_index(region, true); + } + bool is_live(uint region) { + return get_by_index(region); + } + }; - // Outside of GC pauses, the number of bytes used in all regions other - // than the current allocation region. - size_t _summary_bytes_used; - - // This is used for a quick test on whether a reference points into - // the collection set or not. Basically, we have an array, with one - // byte per region, and that byte denotes whether the corresponding - // region is in the collection set or not. The entry corresponding - // the bottom of the heap, i.e., region 0, is pointed to by - // _in_cset_fast_test_base. The _in_cset_fast_test field has been - // biased so that it actually points to address 0 of the address - // space, to make the test as fast as possible (we can simply shift - // the address to address into it, instead of having to subtract the - // bottom of the heap from the address before shifting it; basically - // it works in the same way the card table works). - bool* _in_cset_fast_test; - - // The allocated array used for the fast test on whether a reference - // points into the collection set or not. This field is also used to - // free the array. - bool* _in_cset_fast_test_base; - - // The length of the _in_cset_fast_test_base array. - uint _in_cset_fast_test_length; + HumongousIsLiveBiasedMappedArray _humongous_is_live; + // Stores whether during humongous object registration we found candidate regions. + // If not, we can skip a few steps. + bool _has_humongous_reclaim_candidates; volatile unsigned _gc_time_stamp; @@ -422,7 +360,7 @@ // If the HR printer is active, dump the state of the regions in the // heap after a compaction. - void print_hrs_post_compaction(); + void print_hrm_post_compaction(); double verify(bool guard, const char* msg); void verify_before_gc(); @@ -513,24 +451,17 @@ // humongous object, set is_old to true. If not, to false. HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand); - // Attempt to satisfy a humongous allocation request of the given - // size by finding a contiguous set of free regions of num_regions - // length and remove them from the master free list. Return the - // index of the first region or G1_NULL_HRS_INDEX if the search - // was unsuccessful. - uint humongous_obj_allocate_find_first(uint num_regions, - size_t word_size); - // Initialize a contiguous set of free regions of length num_regions // and starting at index first so that they appear as a single // humongous region. HeapWord* humongous_obj_allocate_initialize_regions(uint first, uint num_regions, - size_t word_size); + size_t word_size, + AllocationContext_t context); // Attempt to allocate a humongous object of the given size. Return // NULL if unsuccessful. - HeapWord* humongous_obj_allocate(size_t word_size); + HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context); // The following two methods, allocate_new_tlab() and // mem_allocate(), are the two main entry points from the runtime @@ -586,6 +517,7 @@ // retry the allocation attempt, potentially scheduling a GC // pause. This should only be used for non-humongous allocations. HeapWord* attempt_allocation_slow(size_t word_size, + AllocationContext_t context, unsigned int* gc_count_before_ret, int* gclocker_retry_count_ret); @@ -600,7 +532,8 @@ // specifies whether the mutator alloc region is expected to be NULL // or not. HeapWord* attempt_allocation_at_safepoint(size_t word_size, - bool expect_null_mutator_alloc_region); + AllocationContext_t context, + bool expect_null_mutator_alloc_region); // It dirties the cards that cover the block so that so that the post // write barrier never queues anything when updating objects on this @@ -612,7 +545,9 @@ // allocation region, either by picking one or expanding the // heap, and then allocate a block of the given size. The block // may not be a humongous - it must fit into a single heap region. - HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); + HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, + size_t word_size, + AllocationContext_t context); HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, HeapRegion* alloc_region, @@ -624,10 +559,12 @@ void par_allocate_remaining_space(HeapRegion* r); // Allocation attempt during GC for a survivor object / PLAB. - inline HeapWord* survivor_attempt_allocation(size_t word_size); + inline HeapWord* survivor_attempt_allocation(size_t word_size, + AllocationContext_t context); // Allocation attempt during GC for an old object / PLAB. - inline HeapWord* old_attempt_allocation(size_t word_size); + inline HeapWord* old_attempt_allocation(size_t word_size, + AllocationContext_t context); // These methods are the "callbacks" from the G1AllocRegion class. @@ -666,13 +603,15 @@ // Callback from VM_G1CollectForAllocation operation. // This function does everything necessary/possible to satisfy a // failed allocation request (including collection, expansion, etc.) - HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded); + HeapWord* satisfy_failed_allocation(size_t word_size, + AllocationContext_t context, + bool* succeeded); // Attempting to expand the heap sufficiently // to support an allocation of the given "word_size". If // successful, perform the allocation and return the address of the // allocated block, or else "NULL". - HeapWord* expand_and_allocate(size_t word_size); + HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context); // Process any reference objects discovered during // an incremental evacuation pause. @@ -684,6 +623,10 @@ public: + G1Allocator* allocator() { + return _allocator; + } + G1MonitoringSupport* g1mm() { assert(_g1mm != NULL, "should have been initialized"); return _g1mm; @@ -695,19 +638,51 @@ // (Rounds up to a HeapRegion boundary.) bool expand(size_t expand_bytes); + // Returns the PLAB statistics given a purpose. + PLABStats* stats_for_purpose(GCAllocPurpose purpose) { + PLABStats* stats = NULL; + + switch (purpose) { + case GCAllocForSurvived: + stats = &_survivor_plab_stats; + break; + case GCAllocForTenured: + stats = &_old_plab_stats; + break; + default: + assert(false, "unrecognized GCAllocPurpose"); + } + + return stats; + } + + // Determines PLAB size for a particular allocation purpose. + size_t desired_plab_sz(GCAllocPurpose purpose); + + inline AllocationContextStats& allocation_context_stats(); + // Do anything common to GC's. virtual void gc_prologue(bool full); virtual void gc_epilogue(bool full); + inline void set_humongous_is_live(oop obj); + + bool humongous_is_live(uint region) { + return _humongous_is_live.is_live(region); + } + + // Returns whether the given region (which must be a humongous (start) region) + // is to be considered conservatively live regardless of any other conditions. + bool humongous_region_is_always_live(uint index); + // Register the given region to be part of the collection set. + inline void register_humongous_region_with_in_cset_fast_test(uint index); + // Register regions with humongous objects (actually on the start region) in + // the in_cset_fast_test table. + void register_humongous_regions_with_in_cset_fast_test(); // We register a region with the fast "in collection set" test. We // simply set to true the array slot corresponding to this region. void register_region_with_in_cset_fast_test(HeapRegion* r) { - assert(_in_cset_fast_test_base != NULL, "sanity"); - assert(r->in_collection_set(), "invariant"); - uint index = r->hrs_index(); - assert(index < _in_cset_fast_test_length, "invariant"); - assert(!_in_cset_fast_test_base[index], "invariant"); - _in_cset_fast_test_base[index] = true; + _in_cset_fast_test.set_in_cset(r->hrm_index()); } // This is a fast test on whether a reference points into the @@ -716,9 +691,7 @@ inline bool in_cset_fast_test(oop obj); void clear_cset_fast_test() { - assert(_in_cset_fast_test_base != NULL, "sanity"); - memset(_in_cset_fast_test_base, false, - (size_t) _in_cset_fast_test_length * sizeof(bool)); + _in_cset_fast_test.clear(); } // This is called at the start of either a concurrent cycle or a Full @@ -847,22 +820,13 @@ // param is for use with parallel roots processing, and should be // the "i" of the calling parallel worker thread's work(i) function. // In the sequential case this param will be ignored. - void g1_process_strong_roots(bool is_scavenging, - ScanningOption so, - OopClosure* scan_non_heap_roots, - OopsInHeapRegionClosure* scan_rs, - G1KlassScanClosure* scan_klasses, - uint worker_i); - - // Apply "blk" to all the weak roots of the system. These include - // JNI weak roots, the code cache, system dictionary, symbol table, - // string table, and referents of reachable weak refs. - void g1_process_weak_roots(OopClosure* root_closure); - - // Notifies all the necessary spaces that the committed space has - // been updated (either expanded or shrunk). It should be called - // after _g1_storage is updated. - void update_committed_space(HeapWord* old_end, HeapWord* new_end); + void g1_process_roots(OopClosure* scan_non_heap_roots, + OopClosure* scan_non_heap_weak_roots, + OopsInHeapRegionClosure* scan_rs, + CLDClosure* scan_strong_clds, + CLDClosure* scan_weak_clds, + CodeBlobClosure* scan_strong_code, + uint worker_i); // The concurrent marker (and the thread it runs in.) ConcurrentMark* _cm; @@ -1050,7 +1014,7 @@ // of G1CollectedHeap::_gc_time_stamp. unsigned int* _worker_cset_start_region_time_stamp; - enum G1H_process_strong_roots_tasks { + enum G1H_process_roots_tasks { G1H_PS_filter_satb_buffers, G1H_PS_refProcessor_oops_do, // Leave this one last. @@ -1131,20 +1095,11 @@ return _gc_time_stamp; } - void reset_gc_time_stamp() { - _gc_time_stamp = 0; - OrderAccess::fence(); - // Clear the cached CSet starting regions and time stamps. - // Their validity is dependent on the GC timestamp. - clear_cset_start_regions(); - } + inline void reset_gc_time_stamp(); void check_gc_time_stamps() PRODUCT_RETURN; - void increment_gc_time_stamp() { - ++_gc_time_stamp; - OrderAccess::fence(); - } + inline void increment_gc_time_stamp(); // Reset the given region's GC timestamp. If it's starts humongous, // also reset the GC timestamp of its corresponding @@ -1182,43 +1137,51 @@ // end fields defining the extent of the contiguous allocation region.) // But G1CollectedHeap doesn't yet support this. - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection or expansion activity. In a - // generational collector, for example, this is probably the largest - // allocation that could be supported (without expansion) in the youngest - // generation. It is "unsafe" because no locks are taken; the result - // should be treated as an approximation, not a guarantee, for use in - // heuristic resizing decisions. - virtual size_t unsafe_max_alloc(); - virtual bool is_maximal_no_gc() const { - return _g1_storage.uncommitted_size() == 0; + return _hrm.available() == 0; } - // The total number of regions in the heap. - uint n_regions() { return _hrs.length(); } + // The current number of regions in the heap. + uint num_regions() const { return _hrm.length(); } // The max number of regions in the heap. - uint max_regions() { return _hrs.max_length(); } + uint max_regions() const { return _hrm.max_length(); } // The number of regions that are completely free. - uint free_regions() { return _free_list.length(); } + uint num_free_regions() const { return _hrm.num_free_regions(); } // The number of regions that are not completely free. - uint used_regions() { return n_regions() - free_regions(); } - - // The number of regions available for "regular" expansion. - uint expansion_regions() { return _expansion_regions; } - - // Factory method for HeapRegion instances. It will return NULL if - // the allocation fails. - HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom); + uint num_used_regions() const { return num_regions() - num_free_regions(); } void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; void verify_dirty_young_regions() PRODUCT_RETURN; +#ifndef PRODUCT + // Make sure that the given bitmap has no marked objects in the + // range [from,limit). If it does, print an error message and return + // false. Otherwise, just return true. bitmap_name should be "prev" + // or "next". + bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap, + HeapWord* from, HeapWord* limit); + + // Verify that the prev / next bitmap range [tams,end) for the given + // region has no marks. Return true if all is well, false if errors + // are detected. + bool verify_bitmaps(const char* caller, HeapRegion* hr); +#endif // PRODUCT + + // If G1VerifyBitmaps is set, verify that the marking bitmaps for + // the given region do not have any spurious marks. If errors are + // detected, print appropriate error messages and crash. + void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN; + + // If G1VerifyBitmaps is set, verify that the marking bitmaps do not + // have any spurious marks. If errors are detected, print + // appropriate error messages and crash. + void check_bitmaps(const char* caller) PRODUCT_RETURN; + // verify_region_sets() performs verification over the region // lists. It will be compiled in the product code to be used when // necessary (i.e., during heap verification). @@ -1237,7 +1200,7 @@ #ifdef ASSERT bool is_on_master_free_list(HeapRegion* hr) { - return hr->containing_set() == &_free_list; + return _hrm.is_free(hr); } #endif // ASSERT @@ -1249,7 +1212,7 @@ } void append_secondary_free_list() { - _free_list.add_ordered(&_secondary_free_list); + _hrm.insert_list_into_free_list(&_secondary_free_list); } void append_secondary_free_list_if_not_empty_with_lock() { @@ -1275,7 +1238,7 @@ // Determine whether the given region is one that we are using as an // old GC alloc region. bool is_old_gc_alloc_region(HeapRegion* hr) { - return hr == _retained_old_gc_alloc_region; + return _allocator->is_retained_old_region(hr); } // Perform a collection of the heap; intended for use in implementing @@ -1286,6 +1249,11 @@ // The same as above but assume that the caller holds the Heap_lock. void collect_locked(GCCause::Cause cause); + virtual bool copy_allocation_context_stats(const jint* contexts, + jlong* totals, + jbyte* accuracy, + jint len); + // True iff an evacuation has failed in the most-recent collection. bool evacuation_failed() { return _evacuation_failed; } @@ -1295,33 +1263,84 @@ // Returns "TRUE" iff "p" points into the committed areas of the heap. virtual bool is_in(const void* p) const; +#ifdef ASSERT + // Returns whether p is in one of the available areas of the heap. Slow but + // extensive version. + bool is_in_exact(const void* p) const; +#endif // Return "TRUE" iff the given object address is within the collection - // set. + // set. Slow implementation. inline bool obj_in_cs(oop obj); + inline bool is_in_cset(oop obj); + + inline bool is_in_cset_or_humongous(const oop obj); + + enum in_cset_state_t { + InNeither, // neither in collection set nor humongous + InCSet, // region is in collection set only + IsHumongous // region is a humongous start region + }; + private: + // Instances of this class are used for quick tests on whether a reference points + // into the collection set or is a humongous object (points into a humongous + // object). + // Each of the array's elements denotes whether the corresponding region is in + // the collection set or a humongous region. + // We use this to quickly reclaim humongous objects: by making a humongous region + // succeed this test, we sort-of add it to the collection set. During the reference + // iteration closures, when we see a humongous region, we simply mark it as + // referenced, i.e. live. + class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray { + protected: + char default_value() const { return G1CollectedHeap::InNeither; } + public: + void set_humongous(uintptr_t index) { + assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values"); + set_by_index(index, G1CollectedHeap::IsHumongous); + } + + void clear_humongous(uintptr_t index) { + set_by_index(index, G1CollectedHeap::InNeither); + } + + void set_in_cset(uintptr_t index) { + assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value"); + set_by_index(index, G1CollectedHeap::InCSet); + } + + bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; } + bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; } + G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); } + void clear() { G1BiasedMappedArray::clear(); } + }; + + // This array is used for a quick test on whether a reference points into + // the collection set or not. Each of the array's elements denotes whether the + // corresponding region is in the collection set or not. + G1FastCSetBiasedMappedArray _in_cset_fast_test; + + public: + + inline in_cset_state_t in_cset_state(const oop obj); + // Return "TRUE" iff the given object address is in the reserved // region of g1. bool is_in_g1_reserved(const void* p) const { - return _g1_reserved.contains(p); + return _hrm.reserved().contains(p); } // Returns a MemRegion that corresponds to the space that has been // reserved for the heap - MemRegion g1_reserved() { - return _g1_reserved; - } - - // Returns a MemRegion that corresponds to the space that has been - // committed in the heap - MemRegion g1_committed() { - return _g1_committed; + MemRegion g1_reserved() const { + return _hrm.reserved(); } virtual bool is_in_closed_subset(const void* p) const; - G1SATBCardTableModRefBS* g1_barrier_set() { - return (G1SATBCardTableModRefBS*) barrier_set(); + G1SATBCardTableLoggingModRefBS* g1_barrier_set() { + return (G1SATBCardTableLoggingModRefBS*) barrier_set(); } // This resets the card table to all zeros. It is used after @@ -1334,9 +1353,6 @@ // "cl.do_oop" on each. virtual void oop_iterate(ExtendedOopClosure* cl); - // Same as above, restricted to a memory region. - void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); - // Iterate over all objects, calling "cl.do_object" on each. virtual void object_iterate(ObjectClosure* cl); @@ -1354,6 +1370,12 @@ // Return the region with the given index. It assumes the index is valid. inline HeapRegion* region_at(uint index) const; + // Calculate the region index of the given address. Given address must be + // within the heap. + inline uint addr_to_region(HeapWord* addr) const; + + inline HeapWord* bottom_addr_for_region(uint index) const; + // Divide the heap region sequence into "chunks" of some size (the number // of regions divided by the number of parallel threads times some // overpartition factor, currently 4). Assumes that this will be called @@ -1367,10 +1389,10 @@ // setting the claim value of the second and subsequent regions of the // chunk.) For now requires that "doHeapRegion" always returns "false", // i.e., that a closure never attempt to abort a traversal. - void heap_region_par_iterate_chunked(HeapRegionClosure* blk, - uint worker, - uint no_of_par_workers, - jint claim_value); + void heap_region_par_iterate_chunked(HeapRegionClosure* cl, + uint worker_id, + uint num_workers, + jint claim_value) const; // It resets all the region claim values to the default. void reset_heap_region_claim_values(); @@ -1395,35 +1417,27 @@ // starting region for iterating over the current collection set. HeapRegion* start_cset_region_for_worker(uint worker_i); - // This is a convenience method that is used by the - // HeapRegionIterator classes to calculate the starting region for - // each worker so that they do not all start from the same region. - HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers); - // Iterate over the regions (if any) in the current collection set. void collection_set_iterate(HeapRegionClosure* blk); // As above but starting from region r void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); - // Returns the first (lowest address) compactible space in the heap. - virtual CompactibleSpace* first_compactible_space(); + HeapRegion* next_compaction_region(const HeapRegion* from) const; // A CollectedHeap will contain some number of spaces. This finds the // space containing a given address, or else returns NULL. virtual Space* space_containing(const void* addr) const; - // A G1CollectedHeap will contain some number of heap regions. This - // finds the region containing a given address, or else returns NULL. + // Returns the HeapRegion that contains addr. addr must not be NULL. + template + inline HeapRegion* heap_region_containing_raw(const T addr) const; + + // Returns the HeapRegion that contains addr. addr must not be NULL. + // If addr is within a humongous continues region, it returns its humongous start region. template inline HeapRegion* heap_region_containing(const T addr) const; - // Like the above, but requires "addr" to be in the heap (to avoid a - // null-check), and unlike the above, may return an continuing humongous - // region. - template - inline HeapRegion* heap_region_containing_raw(const T addr) const; - // A CollectedHeap is divided into a dense sequence of "blocks"; that is, // each address in the (reserved) heap is a member of exactly // one block. The defining characteristic of a block is that it is @@ -1565,7 +1579,6 @@ // the region to which the object belongs. An object is dead // iff a) it was not allocated since the last mark and b) it // is not marked. - bool is_obj_dead(const oop obj, const HeapRegion* hr) const { return !hr->obj_allocated_since_prev_marking(obj) && @@ -1575,7 +1588,6 @@ // This function returns true when an object has been // around since the previous marking and hasn't yet // been marked during this marking. - bool is_obj_ill(const oop obj, const HeapRegion* hr) const { return !hr->obj_allocated_since_next_marking(obj) && @@ -1621,19 +1633,9 @@ // Unregister the given nmethod from the G1 heap virtual void unregister_nmethod(nmethod* nm); - // Migrate the nmethods in the code root lists of the regions - // in the collection set to regions in to-space. In the event - // of an evacuation failure, nmethods that reference objects - // that were not successfullly evacuated are not migrated. - void migrate_strong_code_roots(); - // Free up superfluous code root memory. void purge_code_root_memory(); - // During an initial mark pause, mark all the code roots that - // point into regions *not* in the collection set. - void mark_strong_code_roots(uint worker_id); - // Rebuild the stong code root lists for each region // after a full GC void rebuild_strong_code_roots(); @@ -1642,6 +1644,9 @@ // in symbol table, possibly in parallel. void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true); + // Parallel phase of unloading/cleaning after G1 concurrent mark. + void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred); + // Redirty logged cards in the refinement queue. void redirty_logged_cards(); // Verification @@ -1713,274 +1718,4 @@ size_t _max_heap_capacity; }; -class G1ParGCAllocBuffer: public ParGCAllocBuffer { -private: - bool _retired; - -public: - G1ParGCAllocBuffer(size_t gclab_word_size); - - void set_buf(HeapWord* buf) { - ParGCAllocBuffer::set_buf(buf); - _retired = false; - } - - void retire(bool end_of_gc, bool retain) { - if (_retired) - return; - ParGCAllocBuffer::retire(end_of_gc, retain); - _retired = true; - } -}; - -class G1ParScanThreadState : public StackObj { -protected: - G1CollectedHeap* _g1h; - RefToScanQueue* _refs; - DirtyCardQueue _dcq; - G1SATBCardTableModRefBS* _ct_bs; - G1RemSet* _g1_rem; - - G1ParGCAllocBuffer _surviving_alloc_buffer; - G1ParGCAllocBuffer _tenured_alloc_buffer; - G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; - ageTable _age_table; - - G1ParScanClosure _scanner; - - size_t _alloc_buffer_waste; - size_t _undo_waste; - - OopsInHeapRegionClosure* _evac_failure_cl; - - int _hash_seed; - uint _queue_num; - - size_t _term_attempts; - - double _start; - double _start_strong_roots; - double _strong_roots_time; - double _start_term; - double _term_time; - - // Map from young-age-index (0 == not young, 1 is youngest) to - // surviving words. base is what we get back from the malloc call - size_t* _surviving_young_words_base; - // this points into the array, as we use the first few entries for padding - size_t* _surviving_young_words; - -#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) - - void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } - - void add_to_undo_waste(size_t waste) { _undo_waste += waste; } - - DirtyCardQueue& dirty_card_queue() { return _dcq; } - G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } - - template inline void immediate_rs_update(HeapRegion* from, T* p, int tid); - - template void deferred_rs_update(HeapRegion* from, T* p, int tid) { - // If the new value of the field points to the same region or - // is the to-space, we don't need to include it in the Rset updates. - if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) { - size_t card_index = ctbs()->index_for(p); - // If the card hasn't been added to the buffer, do it. - if (ctbs()->mark_card_deferred(card_index)) { - dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); - } - } - } - -public: - G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp); - - ~G1ParScanThreadState() { - FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC); - } - - RefToScanQueue* refs() { return _refs; } - ageTable* age_table() { return &_age_table; } - - G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { - return _alloc_buffers[purpose]; - } - - size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } - size_t undo_waste() const { return _undo_waste; } - -#ifdef ASSERT - bool verify_ref(narrowOop* ref) const; - bool verify_ref(oop* ref) const; - bool verify_task(StarTask ref) const; -#endif // ASSERT - - template void push_on_queue(T* ref) { - assert(verify_ref(ref), "sanity"); - refs()->push(ref); - } - - template inline void update_rs(HeapRegion* from, T* p, int tid); - - HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { - HeapWord* obj = NULL; - size_t gclab_word_size = _g1h->desired_plab_sz(purpose); - if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { - G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); - add_to_alloc_buffer_waste(alloc_buf->words_remaining()); - alloc_buf->retire(false /* end_of_gc */, false /* retain */); - - HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); - if (buf == NULL) return NULL; // Let caller handle allocation failure. - // Otherwise. - alloc_buf->set_word_size(gclab_word_size); - alloc_buf->set_buf(buf); - - obj = alloc_buf->allocate(word_sz); - assert(obj != NULL, "buffer was definitely big enough..."); - } else { - obj = _g1h->par_allocate_during_gc(purpose, word_sz); - } - return obj; - } - - HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { - HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); - if (obj != NULL) return obj; - return allocate_slow(purpose, word_sz); - } - - void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { - if (alloc_buffer(purpose)->contains(obj)) { - assert(alloc_buffer(purpose)->contains(obj + word_sz - 1), - "should contain whole object"); - alloc_buffer(purpose)->undo_allocation(obj, word_sz); - } else { - CollectedHeap::fill_with_object(obj, word_sz); - add_to_undo_waste(word_sz); - } - } - - void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { - _evac_failure_cl = evac_failure_cl; - } - OopsInHeapRegionClosure* evac_failure_closure() { - return _evac_failure_cl; - } - - int* hash_seed() { return &_hash_seed; } - uint queue_num() { return _queue_num; } - - size_t term_attempts() const { return _term_attempts; } - void note_term_attempt() { _term_attempts++; } - - void start_strong_roots() { - _start_strong_roots = os::elapsedTime(); - } - void end_strong_roots() { - _strong_roots_time += (os::elapsedTime() - _start_strong_roots); - } - double strong_roots_time() const { return _strong_roots_time; } - - void start_term_time() { - note_term_attempt(); - _start_term = os::elapsedTime(); - } - void end_term_time() { - _term_time += (os::elapsedTime() - _start_term); - } - double term_time() const { return _term_time; } - - double elapsed_time() const { - return os::elapsedTime() - _start; - } - - static void - print_termination_stats_hdr(outputStream* const st = gclog_or_tty); - void - print_termination_stats(int i, outputStream* const st = gclog_or_tty) const; - - size_t* surviving_young_words() { - // We add on to hide entry 0 which accumulates surviving words for - // age -1 regions (i.e. non-young ones) - return _surviving_young_words; - } - - void retire_alloc_buffers() { - for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { - size_t waste = _alloc_buffers[ap]->words_remaining(); - add_to_alloc_buffer_waste(waste); - _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap), - true /* end_of_gc */, - false /* retain */); - } - } -private: - #define G1_PARTIAL_ARRAY_MASK 0x2 - - inline bool has_partial_array_mask(oop* ref) const { - return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK; - } - - // We never encode partial array oops as narrowOop*, so return false immediately. - // This allows the compiler to create optimized code when popping references from - // the work queue. - inline bool has_partial_array_mask(narrowOop* ref) const { - assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*"); - return false; - } - - // Only implement set_partial_array_mask() for regular oops, not for narrowOops. - // We always encode partial arrays as regular oop, to allow the - // specialization for has_partial_array_mask() for narrowOops above. - // This means that unintentional use of this method with narrowOops are caught - // by the compiler. - inline oop* set_partial_array_mask(oop obj) const { - assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!"); - return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK); - } - - inline oop clear_partial_array_mask(oop* ref) const { - return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); - } - - inline void do_oop_partial_array(oop* p); - - // This method is applied to the fields of the objects that have just been copied. - template void do_oop_evac(T* p, HeapRegion* from) { - assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)), - "Reference should not be NULL here as such are never pushed to the task queue."); - oop obj = oopDesc::load_decode_heap_oop_not_null(p); - - // Although we never intentionally push references outside of the collection - // set, due to (benign) races in the claim mechanism during RSet scanning more - // than one thread might claim the same card. So the same card may be - // processed multiple times. So redo this check. - if (_g1h->in_cset_fast_test(obj)) { - oop forwardee; - if (obj->is_forwarded()) { - forwardee = obj->forwardee(); - } else { - forwardee = copy_to_survivor_space(obj); - } - assert(forwardee != NULL, "forwardee should not be NULL"); - oopDesc::encode_store_heap_oop(p, forwardee); - } - - assert(obj != NULL, "Must be"); - update_rs(from, p, queue_num()); - } -public: - - oop copy_to_survivor_space(oop const obj); - - template inline void deal_with_reference(T* ref_to_scan); - - inline void deal_with_reference(StarTask ref); - -public: - void trim_queue(); -}; - #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,34 +29,61 @@ #include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" -#include "gc_implementation/g1/g1RemSet.inline.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/g1/heapRegionSet.inline.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "utilities/taskqueue.hpp" // Inline functions for G1CollectedHeap +inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { + return _allocation_context_stats; +} + // Return the region with the given index. It assumes the index is valid. -inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); } +inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } + +inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { + assert(is_in_reserved(addr), + err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")", + p2i(addr), p2i(_reserved.start()), p2i(_reserved.end()))); + return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); +} + +inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { + return _hrm.reserved().start() + index * HeapRegion::GrainWords; +} template -inline HeapRegion* -G1CollectedHeap::heap_region_containing(const T addr) const { - HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); - // hr can be null if addr in perm_gen - if (hr != NULL && hr->continuesHumongous()) { - hr = hr->humongous_start_region(); +inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const { + assert(addr != NULL, "invariant"); + assert(is_in_g1_reserved((const void*) addr), + err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")", + p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()))); + return _hrm.addr_to_region((HeapWord*) addr); +} + +template +inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { + HeapRegion* hr = heap_region_containing_raw(addr); + if (hr->continuesHumongous()) { + return hr->humongous_start_region(); } return hr; } -template -inline HeapRegion* -G1CollectedHeap::heap_region_containing_raw(const T addr) const { - assert(_g1_reserved.contains((const void*) addr), "invariant"); - HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr); - return res; +inline void G1CollectedHeap::reset_gc_time_stamp() { + _gc_time_stamp = 0; + OrderAccess::fence(); + // Clear the cached CSet starting regions and time stamps. + // Their validity is dependent on the GC timestamp. + clear_cset_start_regions(); +} + +inline void G1CollectedHeap::increment_gc_time_stamp() { + ++_gc_time_stamp; + OrderAccess::fence(); } inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { @@ -64,22 +91,23 @@ } inline bool G1CollectedHeap::obj_in_cs(oop obj) { - HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); + HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj); return r != NULL && r->in_collection_set(); } -inline HeapWord* -G1CollectedHeap::attempt_allocation(size_t word_size, - unsigned int* gc_count_before_ret, - int* gclocker_retry_count_ret) { +inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, + unsigned int* gc_count_before_ret, + int* gclocker_retry_count_ret) { assert_heap_not_locked_and_not_at_safepoint(); assert(!isHumongous(word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); - HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, - false /* bot_updates */); + AllocationContext_t context = AllocationContext::current(); + HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size, + false /* bot_updates */); if (result == NULL) { result = attempt_allocation_slow(word_size, + context, gc_count_before_ret, gclocker_retry_count_ret); } @@ -90,17 +118,17 @@ return result; } -inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t - word_size) { +inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size, + AllocationContext_t context) { assert(!isHumongous(word_size), "we should not be seeing humongous-size allocations in this path"); - HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size, - false /* bot_updates */); + HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size, + false /* bot_updates */); if (result == NULL) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size, - false /* bot_updates */); + result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, + false /* bot_updates */); } if (result != NULL) { dirty_young_block(result, word_size); @@ -108,16 +136,17 @@ return result; } -inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) { +inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size, + AllocationContext_t context) { assert(!isHumongous(word_size), "we should not be seeing humongous-size allocations in this path"); - HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size, - true /* bot_updates */); + HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size, + true /* bot_updates */); if (result == NULL) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - result = _old_gc_alloc_region.attempt_allocation_locked(word_size, - true /* bot_updates */); + result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size, + true /* bot_updates */); } return result; } @@ -134,8 +163,7 @@ // have to keep calling heap_region_containing_raw() in the // asserts below. DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) - assert(containing_hr != NULL && start != NULL && word_size > 0, - "pre-condition"); + assert(word_size > 0, "pre-condition"); assert(containing_hr->is_in(start), "it should contain start"); assert(containing_hr->is_young(), "it should be young"); assert(!containing_hr->isHumongous(), "it should not be humongous"); @@ -159,17 +187,11 @@ return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); } - // This is a fast test on whether a reference points into the // collection set or not. Assume that the reference // points into the heap. -inline bool G1CollectedHeap::in_cset_fast_test(oop obj) { - assert(_in_cset_fast_test != NULL, "sanity"); - assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, p2i((HeapWord*)obj))); - // no need to subtract the bottom of the heap from obj, - // _in_cset_fast_test is biased - uintx index = cast_from_oop(obj) >> HeapRegion::LogOfHRGrainBytes; - bool ret = _in_cset_fast_test[index]; +inline bool G1CollectedHeap::is_in_cset(oop obj) { + bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj); // let's make sure the result is consistent with what the slower // test returns assert( ret || !obj_in_cs(obj), "sanity"); @@ -177,6 +199,18 @@ return ret; } +bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) { + return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj); +} + +G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) { + return _in_cset_fast_test.at((HeapWord*)obj); +} + +void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) { + _in_cset_fast_test.set_humongous(index); +} + #ifndef PRODUCT // Support for G1EvacuationFailureALot @@ -226,8 +260,7 @@ } } -inline bool -G1CollectedHeap::evacuation_should_fail() { +inline bool G1CollectedHeap::evacuation_should_fail() { if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { return false; } @@ -251,8 +284,10 @@ #endif // #ifndef PRODUCT inline bool G1CollectedHeap::is_in_young(const oop obj) { - HeapRegion* hr = heap_region_containing(obj); - return hr != NULL && hr->is_young(); + if (obj == NULL) { + return false; + } + return heap_region_containing(obj)->is_young(); } // We don't need barriers for initializing stores to objects @@ -265,105 +300,34 @@ } inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { - const HeapRegion* hr = heap_region_containing(obj); - if (hr == NULL) { - if (obj == NULL) return false; - else return true; + if (obj == NULL) { + return false; } - else return is_obj_dead(obj, hr); + return is_obj_dead(obj, heap_region_containing(obj)); } inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { - const HeapRegion* hr = heap_region_containing(obj); - if (hr == NULL) { - if (obj == NULL) return false; - else return true; + if (obj == NULL) { + return false; } - else return is_obj_ill(obj, hr); + return is_obj_ill(obj, heap_region_containing(obj)); } -template inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) { - if (!from->is_survivor()) { - _g1_rem->par_write_ref(from, p, tid); - } -} - -template void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) { - if (G1DeferredRSUpdate) { - deferred_rs_update(from, p, tid); - } else { - immediate_rs_update(from, p, tid); - } -} - - -inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { - assert(has_partial_array_mask(p), "invariant"); - oop from_obj = clear_partial_array_mask(p); - - assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap."); - assert(from_obj->is_objArray(), "must be obj array"); - objArrayOop from_obj_array = objArrayOop(from_obj); - // The from-space object contains the real length. - int length = from_obj_array->length(); - - assert(from_obj->is_forwarded(), "must be forwarded"); - oop to_obj = from_obj->forwardee(); - assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); - objArrayOop to_obj_array = objArrayOop(to_obj); - // We keep track of the next start index in the length field of the - // to-space object. - int next_index = to_obj_array->length(); - assert(0 <= next_index && next_index < length, - err_msg("invariant, next index: %d, length: %d", next_index, length)); - - int start = next_index; - int end = length; - int remainder = end - start; - // We'll try not to push a range that's smaller than ParGCArrayScanChunk. - if (remainder > 2 * ParGCArrayScanChunk) { - end = start + ParGCArrayScanChunk; - to_obj_array->set_length(end); - // Push the remainder before we process the range in case another - // worker has run out of things to do and can steal it. - oop* from_obj_p = set_partial_array_mask(from_obj); - push_on_queue(from_obj_p); - } else { - assert(length == end, "sanity"); - // We'll process the final range for this object. Restore the length - // so that the heap remains parsable in case of evacuation failure. - to_obj_array->set_length(end); - } - _scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); - // Process indexes [start,end). It will also process the header - // along with the first chunk (i.e., the chunk with start == 0). - // Note that at this point the length field of to_obj_array is not - // correct given that we are using it to keep track of the next - // start index. oop_iterate_range() (thankfully!) ignores the length - // field and only relies on the start / end parameters. It does - // however return the size of the object which will be incorrect. So - // we have to ignore it even if we wanted to use it. - to_obj_array->oop_iterate_range(&_scanner, start, end); -} - -template inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { - if (!has_partial_array_mask(ref_to_scan)) { - // Note: we can use "raw" versions of "region_containing" because - // "obj_to_scan" is definitely in the heap, and is not in a - // humongous region. - HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); - do_oop_evac(ref_to_scan, r); - } else { - do_oop_partial_array((oop*)ref_to_scan); - } -} - -inline void G1ParScanThreadState::deal_with_reference(StarTask ref) { - assert(verify_task(ref), "sanity"); - if (ref.is_narrow()) { - deal_with_reference((narrowOop*)ref); - } else { - deal_with_reference((oop*)ref); +inline void G1CollectedHeap::set_humongous_is_live(oop obj) { + uint region = addr_to_region((HeapWord*)obj); + // We not only set the "live" flag in the humongous_is_live table, but also + // reset the entry in the _in_cset_fast_test table so that subsequent references + // to the same humongous object do not go into the slow path again. + // This is racy, as multiple threads may at the same time enter here, but this + // is benign. + // During collection we only ever set the "live" flag, and only ever clear the + // entry in the in_cset_fast_table. + // We only ever evaluate the contents of these tables (in the VM thread) after + // having synchronized the worker threads with the VM thread, or in the same + // thread (i.e. within the VM thread). + if (!_humongous_is_live.is_live(region)) { + _humongous_is_live.set_live(region); + _in_cset_fast_test.clear_humongous(region); } } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1CollectedHeap.hpp" + +bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts, + jlong* totals, + jbyte* accuracy, + jint len) { + return false; +} --- ./hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -455,7 +455,7 @@ } else { _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); } - _free_regions_at_end_of_collection = _g1->free_regions(); + _free_regions_at_end_of_collection = _g1->num_free_regions(); update_young_list_target_length(); // We may immediately start allocating regions and placing them on the @@ -828,7 +828,7 @@ record_survivor_regions(0, NULL, NULL); - _free_regions_at_end_of_collection = _g1->free_regions(); + _free_regions_at_end_of_collection = _g1->num_free_regions(); // Reset survivors SurvRateGroup. _survivor_surv_rate_group->reset(); update_young_list_target_length(); @@ -1046,7 +1046,7 @@ bool new_in_marking_window = _in_marking_window; bool new_in_marking_window_im = false; - if (during_initial_mark_pause()) { + if (last_pause_included_initial_mark) { new_in_marking_window = true; new_in_marking_window_im = true; } @@ -1180,7 +1180,7 @@ _in_marking_window = new_in_marking_window; _in_marking_window_im = new_in_marking_window_im; - _free_regions_at_end_of_collection = _g1->free_regions(); + _free_regions_at_end_of_collection = _g1->num_free_regions(); update_young_list_target_length(); // Note that _mmu_tracker->max_gc_time() returns the time in seconds. @@ -1202,7 +1202,7 @@ _survivor_used_bytes_before_gc = young_list->survivor_used_bytes(); _heap_capacity_bytes_before_gc = _g1->capacity(); _heap_used_bytes_before_gc = _g1->used(); - _cur_collection_pause_used_regions_at_start = _g1->used_regions(); + _cur_collection_pause_used_regions_at_start = _g1->num_used_regions(); _eden_capacity_bytes_before_gc = (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; @@ -1425,6 +1425,18 @@ #endif // PRODUCT } +bool G1CollectorPolicy::is_young_list_full() { + uint young_list_length = _g1->young_list()->length(); + uint young_list_target_length = _young_list_target_length; + return young_list_length >= young_list_target_length; +} + +bool G1CollectorPolicy::can_expand_young_list() { + uint young_list_length = _g1->young_list()->length(); + uint young_list_max_length = _young_list_max_length; + return young_list_length < young_list_max_length; +} + uint G1CollectorPolicy::max_regions(int purpose) { switch (purpose) { case GCAllocForSurvived: @@ -1617,7 +1629,7 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { _collectionSetChooser->clear(); - uint region_num = _g1->n_regions(); + uint region_num = _g1->num_regions(); if (G1CollectedHeap::use_parallel_gc_threads()) { const uint OverpartitionFactor = 4; uint WorkUnit; @@ -1638,7 +1650,7 @@ MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), MinWorkUnit); } - _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(), + _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(), WorkUnit); ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, (int) WorkUnit); @@ -1664,7 +1676,7 @@ // Add the heap region at the head of the non-incremental collection set void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) { assert(_inc_cset_build_state == Active, "Precondition"); - assert(!hr->is_young(), "non-incremental add of young region"); + assert(hr->is_old(), "the region should be old"); assert(!hr->in_collection_set(), "should not already be in the CSet"); hr->set_in_collection_set(true); @@ -1810,7 +1822,7 @@ // Add the region at the RHS of the incremental cset void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { // We should only ever be appending survivors at the end of a pause - assert( hr->is_survivor(), "Logic"); + assert(hr->is_survivor(), "Logic"); // Do the 'common' stuff add_region_to_incremental_cset_common(hr); @@ -1828,7 +1840,7 @@ // Add the region to the LHS of the incremental cset void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { // Survivors should be added to the RHS at the end of a pause - assert(!hr->is_survivor(), "Logic"); + assert(hr->is_eden(), "Logic"); // Do the 'common' stuff add_region_to_incremental_cset_common(hr); @@ -1935,7 +1947,7 @@ // of them are available. G1CollectedHeap* g1h = G1CollectedHeap::heap(); - const size_t region_num = g1h->n_regions(); + const size_t region_num = g1h->num_regions(); const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; size_t result = region_num * perc / 100; // emulate ceiling @@ -1988,7 +2000,11 @@ HeapRegion* hr = young_list->first_survivor_region(); while (hr != NULL) { assert(hr->is_survivor(), "badly formed young list"); - hr->set_young(); + // There is a convention that all the young regions in the CSet + // are tagged as "eden", so we do this for the survivors here. We + // use the special set_eden_pre_gc() as it doesn't check that the + // region is free (which is not the case here). + hr->set_eden_pre_gc(); hr = hr->get_next_young_region(); } --- ./hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP #include "gc_implementation/g1/collectionSetChooser.hpp" +#include "gc_implementation/g1/g1Allocator.hpp" #include "gc_implementation/g1/g1MMUTracker.hpp" #include "memory/collectorPolicy.hpp" @@ -299,13 +300,13 @@ // Accessors void set_region_eden(HeapRegion* hr, int young_index_in_cset) { - hr->set_young(); + hr->set_eden(); hr->install_surv_rate_group(_short_lived_surv_rate_group); hr->set_young_index_in_cset(young_index_in_cset); } void set_region_survivor(HeapRegion* hr, int young_index_in_cset) { - assert(hr->is_young() && hr->is_survivor(), "pre-condition"); + assert(hr->is_survivor(), "pre-condition"); hr->install_surv_rate_group(_survivor_surv_rate_group); hr->set_young_index_in_cset(young_index_in_cset); } @@ -803,7 +804,7 @@ // If an expansion would be appropriate, because recent GC overhead had // exceeded the desired limit, return an amount to expand by. - size_t expansion_amount(); + virtual size_t expansion_amount(); // Print tracing information. void print_tracing_info() const; @@ -822,17 +823,9 @@ size_t young_list_target_length() const { return _young_list_target_length; } - bool is_young_list_full() { - uint young_list_length = _g1->young_list()->length(); - uint young_list_target_length = _young_list_target_length; - return young_list_length >= young_list_target_length; - } + bool is_young_list_full(); - bool can_expand_young_list() { - uint young_list_length = _g1->young_list()->length(); - uint young_list_max_length = _young_list_max_length; - return young_list_length < young_list_max_length; - } + bool can_expand_young_list(); uint young_list_max_length() { return _young_list_max_length; --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy_ext.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP + +#include "gc_implementation/g1/g1CollectorPolicy.hpp" + +class G1CollectorPolicyExt : public G1CollectorPolicy { }; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -71,6 +71,9 @@ bool _during_initial_mark; bool _during_conc_mark; uint _worker_id; + HeapWord* _end_of_last_gap; + HeapWord* _last_gap_threshold; + HeapWord* _last_obj_threshold; public: RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm, @@ -83,7 +86,10 @@ _update_rset_cl(update_rset_cl), _during_initial_mark(during_initial_mark), _during_conc_mark(during_conc_mark), - _worker_id(worker_id) { } + _worker_id(worker_id), + _end_of_last_gap(hr->bottom()), + _last_gap_threshold(hr->bottom()), + _last_obj_threshold(hr->bottom()) { } size_t marked_bytes() { return _marked_bytes; } @@ -107,7 +113,12 @@ HeapWord* obj_addr = (HeapWord*) obj; assert(_hr->is_in(obj_addr), "sanity"); size_t obj_size = obj->size(); - _hr->update_bot_for_object(obj_addr, obj_size); + HeapWord* obj_end = obj_addr + obj_size; + + if (_end_of_last_gap != obj_addr) { + // there was a gap before obj_addr + _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr); + } if (obj->is_forwarded() && obj->forwardee() == obj) { // The object failed to move. @@ -115,7 +126,9 @@ // We consider all objects that we find self-forwarded to be // live. What we'll do is that we'll update the prev marking // info so that they are all under PTAMS and explicitly marked. - _cm->markPrev(obj); + if (!_cm->isPrevMarked(obj)) { + _cm->markPrev(obj); + } if (_during_initial_mark) { // For the next marking info we'll only mark the // self-forwarded objects explicitly if we are during @@ -145,28 +158,35 @@ // remembered set entries missing given that we skipped cards on // the collection set. So, we'll recreate such entries now. obj->oop_iterate(_update_rset_cl); - assert(_cm->isPrevMarked(obj), "Should be marked!"); } else { + // The object has been either evacuated or is dead. Fill it with a // dummy object. - MemRegion mr((HeapWord*) obj, obj_size); + MemRegion mr(obj_addr, obj_size); CollectedHeap::fill_with_object(mr); + + // must nuke all dead objects which we skipped when iterating over the region + _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end)); } + _end_of_last_gap = obj_end; + _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end); } }; class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure { G1CollectedHeap* _g1h; ConcurrentMark* _cm; - OopsInHeapRegionClosure *_update_rset_cl; uint _worker_id; + DirtyCardQueue _dcq; + UpdateRSetDeferred _update_rset_cl; + public: RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h, - OopsInHeapRegionClosure* update_rset_cl, uint worker_id) : - _g1h(g1h), _update_rset_cl(update_rset_cl), - _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { } + _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq), + _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { + } bool doHeapRegion(HeapRegion *hr) { bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause(); @@ -177,20 +197,14 @@ if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) { if (hr->evacuation_failed()) { - RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl, + RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl, during_initial_mark, during_conc_mark, _worker_id); - MemRegion mr(hr->bottom(), hr->end()); - // We'll recreate the prev marking info so we'll first clear - // the prev bitmap range for this region. We never mark any - // CSet objects explicitly so the next bitmap range should be - // cleared anyway. - _cm->clearRangePrevBitmap(mr); - hr->note_self_forwarding_removal_start(during_initial_mark, during_conc_mark); + _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr); // In the common case (i.e. when there is no evacuation // failure) we make sure that the following is done when @@ -202,9 +216,11 @@ // whenever this might be required in the future. hr->rem_set()->reset_for_par_iteration(); hr->reset_bot(); - _update_rset_cl->set_region(hr); + _update_rset_cl.set_region(hr); hr->object_iterate(&rspc); + hr->rem_set()->clean_strong_code_roots(hr); + hr->note_self_forwarding_removal_end(during_initial_mark, during_conc_mark, rspc.marked_bytes()); @@ -224,16 +240,7 @@ _g1h(g1h) { } void work(uint worker_id) { - UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); - DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); - UpdateRSetDeferred deferred_update(_g1h, &dcq); - - OopsInHeapRegionClosure *update_rset_cl = &deferred_update; - if (!G1DeferredRSUpdate) { - update_rset_cl = &immediate_update; - } - - RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id); + RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id); HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id); _g1h->collection_set_iterate_from(hr, &rsfp_cl); --- ./hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -166,13 +166,14 @@ _last_update_rs_processed_buffers(_max_gc_threads, "%d"), _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"), _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"), - _last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"), _last_obj_copy_times_ms(_max_gc_threads, "%.1lf"), _last_termination_times_ms(_max_gc_threads, "%.1lf"), _last_termination_attempts(_max_gc_threads, SIZE_FORMAT), _last_gc_worker_end_times_ms(_max_gc_threads, "%.1lf", false), _last_gc_worker_times_ms(_max_gc_threads, "%.1lf"), _last_gc_worker_other_times_ms(_max_gc_threads, "%.1lf"), + _last_redirty_logged_cards_time_ms(_max_gc_threads, "%.1lf"), + _last_redirty_logged_cards_processed_cards(_max_gc_threads, SIZE_FORMAT), _cur_string_dedup_queue_fixup_worker_times_ms(_max_gc_threads, "%.1lf"), _cur_string_dedup_table_fixup_worker_times_ms(_max_gc_threads, "%.1lf") { @@ -191,13 +192,16 @@ _last_update_rs_processed_buffers.reset(); _last_scan_rs_times_ms.reset(); _last_strong_code_root_scan_times_ms.reset(); - _last_strong_code_root_mark_times_ms.reset(); _last_obj_copy_times_ms.reset(); _last_termination_times_ms.reset(); _last_termination_attempts.reset(); _last_gc_worker_end_times_ms.reset(); _last_gc_worker_times_ms.reset(); _last_gc_worker_other_times_ms.reset(); + + _last_redirty_logged_cards_time_ms.reset(); + _last_redirty_logged_cards_processed_cards.reset(); + } void G1GCPhaseTimes::note_gc_end() { @@ -208,7 +212,6 @@ _last_update_rs_processed_buffers.verify(); _last_scan_rs_times_ms.verify(); _last_strong_code_root_scan_times_ms.verify(); - _last_strong_code_root_mark_times_ms.verify(); _last_obj_copy_times_ms.verify(); _last_termination_times_ms.verify(); _last_termination_attempts.verify(); @@ -223,7 +226,6 @@ _last_update_rs_times_ms.get(i) + _last_scan_rs_times_ms.get(i) + _last_strong_code_root_scan_times_ms.get(i) + - _last_strong_code_root_mark_times_ms.get(i) + _last_obj_copy_times_ms.get(i) + _last_termination_times_ms.get(i); @@ -233,6 +235,9 @@ _last_gc_worker_times_ms.verify(); _last_gc_worker_other_times_ms.verify(); + + _last_redirty_logged_cards_time_ms.verify(); + _last_redirty_logged_cards_processed_cards.verify(); } void G1GCPhaseTimes::note_string_dedup_fixup_start() { @@ -249,6 +254,10 @@ LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value); } +void G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) { + LineBuffer(level).append_and_print_cr("[%s: "SIZE_FORMAT"]", str, value); +} + void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) { LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: " UINT32_FORMAT "]", str, value, workers); } @@ -263,9 +272,6 @@ // Now subtract the time taken to fix up roots in generated code misc_time_ms += _cur_collection_code_root_fixup_time_ms; - // Strong code root migration time - misc_time_ms += _cur_strong_code_root_migration_time_ms; - // Strong code root purge time misc_time_ms += _cur_strong_code_root_purge_time_ms; @@ -292,9 +298,6 @@ if (_last_satb_filtering_times_ms.sum() > 0.0) { _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)"); } - if (_last_strong_code_root_mark_times_ms.sum() > 0.0) { - _last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)"); - } _last_update_rs_times_ms.print(2, "Update RS (ms)"); _last_update_rs_processed_buffers.print(3, "Processed Buffers"); _last_scan_rs_times_ms.print(2, "Scan RS (ms)"); @@ -312,9 +315,6 @@ if (_last_satb_filtering_times_ms.sum() > 0.0) { _last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)"); } - if (_last_strong_code_root_mark_times_ms.sum() > 0.0) { - _last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)"); - } _last_update_rs_times_ms.print(1, "Update RS (ms)"); _last_update_rs_processed_buffers.print(2, "Processed Buffers"); _last_scan_rs_times_ms.print(1, "Scan RS (ms)"); @@ -322,7 +322,6 @@ _last_obj_copy_times_ms.print(1, "Object Copy (ms)"); } print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms); - print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms); print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms); if (G1StringDedup::is_enabled()) { print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads); @@ -350,8 +349,18 @@ _recorded_non_young_cset_choice_time_ms)); print_stats(2, "Ref Proc", _cur_ref_proc_time_ms); print_stats(2, "Ref Enq", _cur_ref_enq_time_ms); - if (G1DeferredRSUpdate) { - print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms); + print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms); + if (G1Log::finest()) { + _last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty"); + _last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards"); + } + if (G1ReclaimDeadHumongousObjectsAtYoungGC) { + print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms); + if (G1Log::finest()) { + print_stats(3, "Humongous Total", _cur_fast_reclaim_humongous_total); + print_stats(3, "Humongous Candidate", _cur_fast_reclaim_humongous_candidates); + print_stats(3, "Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed); + } } print_stats(2, "Free CSet", (_recorded_young_free_cset_time_ms + --- ./hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -120,7 +120,6 @@ WorkerDataArray _last_update_rs_processed_buffers; WorkerDataArray _last_scan_rs_times_ms; WorkerDataArray _last_strong_code_root_scan_times_ms; - WorkerDataArray _last_strong_code_root_mark_times_ms; WorkerDataArray _last_obj_copy_times_ms; WorkerDataArray _last_termination_times_ms; WorkerDataArray _last_termination_attempts; @@ -130,7 +129,6 @@ double _cur_collection_par_time_ms; double _cur_collection_code_root_fixup_time_ms; - double _cur_strong_code_root_migration_time_ms; double _cur_strong_code_root_purge_time_ms; double _cur_evac_fail_recalc_used; @@ -151,16 +149,24 @@ double _recorded_young_cset_choice_time_ms; double _recorded_non_young_cset_choice_time_ms; + WorkerDataArray _last_redirty_logged_cards_time_ms; + WorkerDataArray _last_redirty_logged_cards_processed_cards; double _recorded_redirty_logged_cards_time_ms; double _recorded_young_free_cset_time_ms; double _recorded_non_young_free_cset_time_ms; + double _cur_fast_reclaim_humongous_time_ms; + size_t _cur_fast_reclaim_humongous_total; + size_t _cur_fast_reclaim_humongous_candidates; + size_t _cur_fast_reclaim_humongous_reclaimed; + double _cur_verify_before_time_ms; double _cur_verify_after_time_ms; // Helper methods for detailed logging void print_stats(int level, const char* str, double value); + void print_stats(int level, const char* str, size_t value); void print_stats(int level, const char* str, double value, uint workers); public: @@ -197,10 +203,6 @@ _last_strong_code_root_scan_times_ms.set(worker_i, ms); } - void record_strong_code_root_mark_time(uint worker_i, double ms) { - _last_strong_code_root_mark_times_ms.set(worker_i, ms); - } - void record_obj_copy_time(uint worker_i, double ms) { _last_obj_copy_times_ms.set(worker_i, ms); } @@ -230,10 +232,6 @@ _cur_collection_code_root_fixup_time_ms = ms; } - void record_strong_code_root_migration_time(double ms) { - _cur_strong_code_root_migration_time_ms = ms; - } - void record_strong_code_root_purge_time(double ms) { _cur_strong_code_root_purge_time_ms = ms; } @@ -285,6 +283,16 @@ _recorded_non_young_free_cset_time_ms = time_ms; } + void record_fast_reclaim_humongous_stats(size_t total, size_t candidates) { + _cur_fast_reclaim_humongous_total = total; + _cur_fast_reclaim_humongous_candidates = candidates; + } + + void record_fast_reclaim_humongous_time_ms(double value, size_t reclaimed) { + _cur_fast_reclaim_humongous_time_ms = value; + _cur_fast_reclaim_humongous_reclaimed = reclaimed; + } + void record_young_cset_choice_time_ms(double time_ms) { _recorded_young_cset_choice_time_ms = time_ms; } @@ -293,6 +301,14 @@ _recorded_non_young_cset_choice_time_ms = time_ms; } + void record_redirty_logged_cards_time_ms(uint worker_i, double time_ms) { + _last_redirty_logged_cards_time_ms.set(worker_i, time_ms); + } + + void record_redirty_logged_cards_processed_cards(uint worker_i, size_t processed_buffers) { + _last_redirty_logged_cards_processed_cards.set(worker_i, processed_buffers); + } + void record_redirty_logged_cards_time_ms(double time_ms) { _recorded_redirty_logged_cards_time_ms = time_ms; } @@ -343,6 +359,10 @@ return _recorded_non_young_free_cset_time_ms; } + double fast_reclaim_humongous_time_ms() { + return _cur_fast_reclaim_humongous_time_ms; + } + double average_last_update_rs_time() { return _last_update_rs_times_ms.average(); } @@ -359,10 +379,6 @@ return _last_strong_code_root_scan_times_ms.average(); } - double average_last_strong_code_root_mark_time(){ - return _last_strong_code_root_mark_times_ms.average(); - } - double average_last_obj_copy_time() { return _last_obj_copy_times_ms.average(); } --- ./hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -27,13 +27,12 @@ #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1HotCardCache.hpp" #include "gc_implementation/g1/g1RemSet.hpp" -#include "gc_implementation/g1/heapRegion.hpp" #include "runtime/atomic.hpp" G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h): _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {} -void G1HotCardCache::initialize() { +void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) { if (default_use_cache()) { _use_cache = true; @@ -49,7 +48,7 @@ _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers); _hot_cache_par_claimed_idx = 0; - _card_counts.initialize(); + _card_counts.initialize(card_counts_storage); } } @@ -135,10 +134,6 @@ // above, are discarded prior to re-enabling the cache near the end of the GC. } -void G1HotCardCache::resize_card_counts(size_t heap_capacity) { - _card_counts.resize(heap_capacity); -} - void G1HotCardCache::reset_card_counts(HeapRegion* hr) { _card_counts.clear_region(hr); } --- ./hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -78,7 +78,7 @@ G1HotCardCache(G1CollectedHeap* g1h); ~G1HotCardCache(); - void initialize(); + void initialize(G1RegionToSpaceMapper* card_counts_storage); bool use_cache() { return _use_cache; } @@ -115,9 +115,6 @@ bool hot_cache_is_empty() { return _n_hot == 0; } - // Resizes the card counts table to match the given capacity - void resize_card_counts(size_t heap_capacity); - // Zeros the values in the card counts table for entire committed heap void reset_card_counts(); --- ./hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -123,20 +123,20 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer()); + GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GenMarkSweep::trace(" 1"); SharedHeap* sh = SharedHeap::heap(); - // Need cleared claim bits for the strong roots processing + // Need cleared claim bits for the roots processing ClassLoaderDataGraph::clear_claimed_marks(); - sh->process_strong_roots(true, // activate StrongRootsScope - false, // not scavenging. - SharedHeap::SO_SystemClasses, + MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations); + sh->process_strong_roots(true, // activate StrongRootsScope + SharedHeap::SO_None, &GenMarkSweep::follow_root_closure, - &GenMarkSweep::follow_code_root_closure, - &GenMarkSweep::follow_klass_closure); + &GenMarkSweep::follow_cld_closure, + &follow_code_closure); // Process reference objects found during marking ReferenceProcessor* rp = GenMarkSweep::ref_processor(); @@ -148,7 +148,8 @@ &GenMarkSweep::keep_alive, &GenMarkSweep::follow_stack_closure, NULL, - gc_timer()); + gc_timer(), + gc_tracer()->gc_id()); gc_tracer()->report_gc_reference_stats(stats); @@ -193,65 +194,6 @@ gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive); } -class G1PrepareCompactClosure: public HeapRegionClosure { - G1CollectedHeap* _g1h; - ModRefBarrierSet* _mrbs; - CompactPoint _cp; - HeapRegionSetCount _humongous_regions_removed; - - void free_humongous_region(HeapRegion* hr) { - HeapWord* end = hr->end(); - FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); - - assert(hr->startsHumongous(), - "Only the start of a humongous region should be freed."); - - hr->set_containing_set(NULL); - _humongous_regions_removed.increment(1u, hr->capacity()); - - _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */); - hr->prepare_for_compaction(&_cp); - // Also clear the part of the card table that will be unused after - // compaction. - _mrbs->clear(MemRegion(hr->compaction_top(), end)); - dummy_free_list.remove_all(); - } - -public: - G1PrepareCompactClosure(CompactibleSpace* cs) - : _g1h(G1CollectedHeap::heap()), - _mrbs(_g1h->g1_barrier_set()), - _cp(NULL, cs, cs->initialize_threshold()), - _humongous_regions_removed() { } - - void update_sets() { - // We'll recalculate total used bytes and recreate the free list - // at the end of the GC, so no point in updating those values here. - HeapRegionSetCount empty_set; - _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed); - } - - bool doHeapRegion(HeapRegion* hr) { - if (hr->isHumongous()) { - if (hr->startsHumongous()) { - oop obj = oop(hr->bottom()); - if (obj->is_gc_marked()) { - obj->forward_to(obj); - } else { - free_humongous_region(hr); - } - } else { - assert(hr->continuesHumongous(), "Invalid humongous."); - } - } else { - hr->prepare_for_compaction(&_cp); - // Also clear the part of the card table that will be unused after - // compaction. - _mrbs->clear(MemRegion(hr->compaction_top(), hr->end())); - } - return false; - } -}; void G1MarkSweep::mark_sweep_phase2() { // Now all live objects are marked, compute the new object addresses. @@ -260,21 +202,10 @@ // phase2, phase3 and phase4, but the ValidateMarkSweep live oops // tracking expects us to do so. See comment under phase4. - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - - GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer()); + GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GenMarkSweep::trace("2"); - // find the first region - HeapRegion* r = g1h->region_at(0); - CompactibleSpace* sp = r; - if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) { - sp = r->next_compaction_space(); - } - - G1PrepareCompactClosure blk(sp); - g1h->heap_region_iterate(&blk); - blk.update_sets(); + prepare_compaction(); } class G1AdjustPointersClosure: public HeapRegionClosure { @@ -299,27 +230,27 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); // Adjust the pointers to reflect the new locations - GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer()); + GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GenMarkSweep::trace("3"); SharedHeap* sh = SharedHeap::heap(); - // Need cleared claim bits for the strong roots processing + // Need cleared claim bits for the roots processing ClassLoaderDataGraph::clear_claimed_marks(); - sh->process_strong_roots(true, // activate StrongRootsScope - false, // not scavenging. - SharedHeap::SO_AllClasses, - &GenMarkSweep::adjust_pointer_closure, - NULL, // do not touch code cache here - &GenMarkSweep::adjust_klass_closure); + CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); + sh->process_all_roots(true, // activate StrongRootsScope + SharedHeap::SO_AllCodeCache, + &GenMarkSweep::adjust_pointer_closure, + &GenMarkSweep::adjust_cld_closure, + &adjust_code_closure); assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity"); g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) - g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure); + sh->process_weak_roots(&GenMarkSweep::adjust_pointer_closure); if (G1StringDedup::is_enabled()) { G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure); @@ -362,10 +293,75 @@ // to use a higher index (saved from phase2) when verifying perm_gen. G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer()); + GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GenMarkSweep::trace("4"); G1SpaceCompactClosure blk; g1h->heap_region_iterate(&blk); } + +void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + g1h->heap_region_iterate(blk); + blk->update_sets(); +} + +void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) { + HeapWord* end = hr->end(); + FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); + + assert(hr->startsHumongous(), + "Only the start of a humongous region should be freed."); + + hr->set_containing_set(NULL); + _humongous_regions_removed.increment(1u, hr->capacity()); + + _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */); + prepare_for_compaction(hr, end); + dummy_free_list.remove_all(); +} + +void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) { + // If this is the first live region that we came across which we can compact, + // initialize the CompactPoint. + if (!is_cp_initialized()) { + _cp.space = hr; + _cp.threshold = hr->initialize_threshold(); + } + prepare_for_compaction_work(&_cp, hr, end); +} + +void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp, + HeapRegion* hr, + HeapWord* end) { + hr->prepare_for_compaction(cp); + // Also clear the part of the card table that will be unused after + // compaction. + _mrbs->clear(MemRegion(hr->compaction_top(), end)); +} + +void G1PrepareCompactClosure::update_sets() { + // We'll recalculate total used bytes and recreate the free list + // at the end of the GC, so no point in updating those values here. + HeapRegionSetCount empty_set; + _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed); +} + +bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) { + if (hr->isHumongous()) { + if (hr->startsHumongous()) { + oop obj = oop(hr->bottom()); + if (obj->is_gc_marked()) { + obj->forward_to(obj); + } else { + free_humongous_region(hr); + } + } else { + assert(hr->continuesHumongous(), "Invalid humongous."); + } + } else { + prepare_for_compaction(hr, hr->end()); + } + return false; +} --- ./hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -43,7 +43,7 @@ // compaction. // // Class unloading will only occur when a full gc is invoked. - +class G1PrepareCompactClosure; class G1MarkSweep : AllStatic { friend class VM_G1MarkSweep; @@ -70,6 +70,30 @@ static void mark_sweep_phase4(); static void allocate_stacks(); + static void prepare_compaction(); + static void prepare_compaction_work(G1PrepareCompactClosure* blk); +}; + +class G1PrepareCompactClosure : public HeapRegionClosure { + protected: + G1CollectedHeap* _g1h; + ModRefBarrierSet* _mrbs; + CompactPoint _cp; + HeapRegionSetCount _humongous_regions_removed; + + virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end); + void prepare_for_compaction_work(CompactPoint* cp, HeapRegion* hr, HeapWord* end); + void free_humongous_region(HeapRegion* hr); + bool is_cp_initialized() const { return _cp.space != NULL; } + + public: + G1PrepareCompactClosure() : + _g1h(G1CollectedHeap::heap()), + _mrbs(_g1h->g1_barrier_set()), + _humongous_regions_removed() { } + + void update_sets(); + bool doHeapRegion(HeapRegion* hr); }; #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep_ext.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1MarkSweep.hpp" + +void G1MarkSweep::prepare_compaction() { + G1PrepareCompactClosure blk; + G1MarkSweep::prepare_compaction_work(&blk); +} --- ./hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,7 +25,28 @@ #include "precompiled.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" +#include "gc_implementation/g1/g1ParScanThreadState.hpp" G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL), _cm(_g1->concurrent_mark()) {} + +G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1) : + _g1(g1), _par_scan_state(NULL), _worker_id(UINT_MAX) { } + +G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : + _g1(g1), _par_scan_state(NULL), + _worker_id(UINT_MAX) { + set_par_scan_thread_state(par_scan_state); +} + +void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan_state) { + assert(_par_scan_state == NULL, "_par_scan_state must only be set once"); + assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL."); + + _par_scan_state = par_scan_state; + _worker_id = par_scan_state->queue_num(); + + assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u), + err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u))); +} --- ./hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,8 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP +#include "memory/iterator.hpp" + class HeapRegion; class G1CollectedHeap; class G1RemSet; @@ -51,8 +53,13 @@ G1ParScanThreadState* _par_scan_state; uint _worker_id; public: + // Initializes the instance, leaving _par_scan_state uninitialized. Must be done + // later using the set_par_scan_thread_state() method. + G1ParClosureSuper(G1CollectedHeap* g1); G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state); bool apply_to_weak_ref_discovered_field() { return true; } + + void set_par_scan_thread_state(G1ParScanThreadState* par_scan_state); }; class G1ParPushHeapRSClosure : public G1ParClosureSuper { @@ -68,9 +75,8 @@ class G1ParScanClosure : public G1ParClosureSuper { public: - G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) : - G1ParClosureSuper(g1, par_scan_state) - { + G1ParScanClosure(G1CollectedHeap* g1, ReferenceProcessor* rp) : + G1ParClosureSuper(g1) { assert(_ref_processor == NULL, "sanity"); _ref_processor = rp; } @@ -102,7 +108,7 @@ template void do_klass_barrier(T* p, oop new_obj); }; -template +template class G1ParCopyClosure : public G1ParCopyHelper { private: template void do_oop_work(T* p); @@ -117,19 +123,19 @@ template void do_oop_nv(T* p) { do_oop_work(p); } virtual void do_oop(oop* p) { do_oop_nv(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + + G1CollectedHeap* g1() { return _g1; }; + G1ParScanThreadState* pss() { return _par_scan_state; } + ReferenceProcessor* rp() { return _ref_processor; }; }; -typedef G1ParCopyClosure G1ParScanExtRootClosure; -typedef G1ParCopyClosure G1ParScanMetadataClosure; - - -typedef G1ParCopyClosure G1ParScanAndMarkExtRootClosure; -typedef G1ParCopyClosure G1ParScanAndMarkMetadataClosure; - +typedef G1ParCopyClosure G1ParScanExtRootClosure; +typedef G1ParCopyClosure G1ParScanAndMarkExtRootClosure; +typedef G1ParCopyClosure G1ParScanAndMarkWeakExtRootClosure; // We use a separate closure to handle references during evacuation // failure processing. -typedef G1ParCopyClosure G1ParScanHeapEvacFailureClosure; +typedef G1ParCopyClosure G1ParScanHeapEvacFailureClosure; class FilterIntoCSClosure: public ExtendedOopClosure { G1CollectedHeap* _g1; @@ -160,10 +166,11 @@ }; // Closure for iterating over object fields during concurrent marking -class G1CMOopClosure : public ExtendedOopClosure { +class G1CMOopClosure : public MetadataAwareOopClosure { +protected: + ConcurrentMark* _cm; private: G1CollectedHeap* _g1h; - ConcurrentMark* _cm; CMTask* _task; public: G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task); @@ -173,7 +180,7 @@ }; // Closure to scan the root regions during concurrent marking -class G1RootRegionScanClosure : public ExtendedOopClosure { +class G1RootRegionScanClosure : public MetadataAwareOopClosure { private: G1CollectedHeap* _g1h; ConcurrentMark* _cm; --- ./hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,9 +28,12 @@ #include "gc_implementation/g1/concurrentMark.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1OopClosures.hpp" +#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp" #include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" +#include "memory/iterator.inline.hpp" +#include "runtime/prefetch.inline.hpp" /* * This really ought to be an inline function, but apparently the C++ @@ -41,7 +44,7 @@ inline void FilterIntoCSClosure::do_oop_nv(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop) && - _g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) { + _g1->is_in_cset_or_humongous(oopDesc::decode_heap_oop_not_null(heap_oop))) { _oc->do_oop(p); } } @@ -64,7 +67,8 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - if (_g1->in_cset_fast_test(obj)) { + G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj); + if (state == G1CollectedHeap::InCSet) { // We're not going to even bother checking whether the object is // already forwarded or not, as this usually causes an immediate // stall. We'll try to prefetch the object (for write, given that @@ -83,6 +87,9 @@ _par_scan_state->push_on_queue(p); } else { + if (state == G1CollectedHeap::IsHumongous) { + _g1->set_humongous_is_live(obj); + } _par_scan_state->update_rs(_from, p, _worker_id); } } @@ -94,22 +101,20 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - if (_g1->in_cset_fast_test(obj)) { + if (_g1->is_in_cset_or_humongous(obj)) { Prefetch::write(obj->mark_addr(), 0); Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); // Place on the references queue _par_scan_state->push_on_queue(p); + } else { + assert(!_g1->obj_in_cs(obj), "checking"); } } } template inline void G1CMOopClosure::do_oop_nv(T* p) { - assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant"); - assert(!_g1h->is_on_master_free_list( - _g1h->heap_region_containing((HeapWord*) p)), "invariant"); - oop obj = oopDesc::load_decode_heap_oop(p); if (_cm->verbose_high()) { gclog_or_tty->print_cr("[%u] we're looking at location " @@ -125,9 +130,7 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj); - if (hr != NULL) { - _cm->grayRoot(obj, obj->size(), _worker_id, hr); - } + _cm->grayRoot(obj, obj->size(), _worker_id, hr); } } @@ -154,57 +157,61 @@ template inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) { oop obj = oopDesc::load_decode_heap_oop(p); + if (obj == NULL) { + return; + } #ifdef ASSERT // can't do because of races // assert(obj == NULL || obj->is_oop(), "expected an oop"); // Do the safe subset of is_oop - if (obj != NULL) { #ifdef CHECK_UNHANDLED_OOPS - oopDesc* o = obj.obj(); + oopDesc* o = obj.obj(); #else - oopDesc* o = obj; + oopDesc* o = obj; #endif // CHECK_UNHANDLED_OOPS - assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); - assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); - } + assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); + assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); #endif // ASSERT assert(_from != NULL, "from region must be non-NULL"); assert(_from->is_in_reserved(p), "p is not in from"); HeapRegion* to = _g1->heap_region_containing(obj); - if (to != NULL && _from != to) { - // The _record_refs_into_cset flag is true during the RSet - // updating part of an evacuation pause. It is false at all - // other times: - // * rebuilding the rembered sets after a full GC - // * during concurrent refinement. - // * updating the remembered sets of regions in the collection - // set in the event of an evacuation failure (when deferred - // updates are enabled). + if (_from == to) { + // Normally this closure should only be called with cross-region references. + // But since Java threads are manipulating the references concurrently and we + // reload the values things may have changed. + return; + } + // The _record_refs_into_cset flag is true during the RSet + // updating part of an evacuation pause. It is false at all + // other times: + // * rebuilding the remembered sets after a full GC + // * during concurrent refinement. + // * updating the remembered sets of regions in the collection + // set in the event of an evacuation failure (when deferred + // updates are enabled). - if (_record_refs_into_cset && to->in_collection_set()) { - // We are recording references that point into the collection - // set and this particular reference does exactly that... - // If the referenced object has already been forwarded - // to itself, we are handling an evacuation failure and - // we have already visited/tried to copy this object - // there is no need to retry. - if (!self_forwarded(obj)) { - assert(_push_ref_cl != NULL, "should not be null"); - // Push the reference in the refs queue of the G1ParScanThreadState - // instance for this worker thread. - _push_ref_cl->do_oop(p); - } + if (_record_refs_into_cset && to->in_collection_set()) { + // We are recording references that point into the collection + // set and this particular reference does exactly that... + // If the referenced object has already been forwarded + // to itself, we are handling an evacuation failure and + // we have already visited/tried to copy this object + // there is no need to retry. + if (!self_forwarded(obj)) { + assert(_push_ref_cl != NULL, "should not be null"); + // Push the reference in the refs queue of the G1ParScanThreadState + // instance for this worker thread. + _push_ref_cl->do_oop(p); + } - // Deferred updates to the CSet are either discarded (in the normal case), - // or processed (if an evacuation failure occurs) at the end - // of the collection. - // See G1RemSet::cleanup_after_oops_into_collection_set_do(). - return; - } - + // Deferred updates to the CSet are either discarded (in the normal case), + // or processed (if an evacuation failure occurs) at the end + // of the collection. + // See G1RemSet::cleanup_after_oops_into_collection_set_do(). + } else { // We either don't care about pushing references that point into the // collection set (i.e. we're not during an evacuation pause) _or_ // the reference doesn't point into the collection set. Either way --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp" +#include "oops/markOop.hpp" +#include "oops/oop.inline.hpp" +#include "services/memTracker.hpp" +#ifdef TARGET_OS_FAMILY_linux +# include "os_linux.inline.hpp" +#endif +#ifdef TARGET_OS_FAMILY_solaris +# include "os_solaris.inline.hpp" +#endif +#ifdef TARGET_OS_FAMILY_windows +# include "os_windows.inline.hpp" +#endif +#ifdef TARGET_OS_FAMILY_aix +# include "os_aix.inline.hpp" +#endif +#ifdef TARGET_OS_FAMILY_bsd +# include "os_bsd.inline.hpp" +#endif +#include "utilities/bitMap.inline.hpp" + +G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL), + _high_boundary(NULL), _committed(), _page_size(0), _special(false), + _dirty(), _executable(false) { +} + +bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) { + if (!rs.is_reserved()) { + return false; // Allocation failed. + } + assert(_low_boundary == NULL, "VirtualSpace already initialized"); + assert(page_size > 0, "Granularity must be non-zero."); + + _low_boundary = rs.base(); + _high_boundary = _low_boundary + rs.size(); + + _special = rs.special(); + _executable = rs.executable(); + + _page_size = page_size; + + assert(_committed.size() == 0, "virtual space initialized more than once"); + uintx size_in_bits = rs.size() / page_size; + _committed.resize(size_in_bits, /* in_resource_area */ false); + if (_special) { + _dirty.resize(size_in_bits, /* in_resource_area */ false); + } + + return true; +} + + +G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() { + release(); +} + +void G1PageBasedVirtualSpace::release() { + // This does not release memory it never reserved. + // Caller must release via rs.release(); + _low_boundary = NULL; + _high_boundary = NULL; + _special = false; + _executable = false; + _page_size = 0; + _committed.resize(0, false); + _dirty.resize(0, false); +} + +size_t G1PageBasedVirtualSpace::committed_size() const { + return _committed.count_one_bits() * _page_size; +} + +size_t G1PageBasedVirtualSpace::reserved_size() const { + return pointer_delta(_high_boundary, _low_boundary, sizeof(char)); +} + +size_t G1PageBasedVirtualSpace::uncommitted_size() const { + return reserved_size() - committed_size(); +} + +uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const { + return (addr - _low_boundary) / _page_size; +} + +bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const { + uintptr_t end = start + size_in_pages; + return _committed.get_next_zero_offset(start, end) >= end; +} + +bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const { + uintptr_t end = start + size_in_pages; + return _committed.get_next_one_offset(start, end) >= end; +} + +char* G1PageBasedVirtualSpace::page_start(uintptr_t index) { + return _low_boundary + index * _page_size; +} + +size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) { + return num * _page_size; +} + +bool G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) { + // We need to make sure to commit all pages covered by the given area. + guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted"); + + bool zero_filled = true; + uintptr_t end = start + size_in_pages; + + if (_special) { + // Check for dirty pages and update zero_filled if any found. + if (_dirty.get_next_one_offset(start,end) < end) { + zero_filled = false; + _dirty.clear_range(start, end); + } + } else { + os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable, + err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages)); + } + _committed.set_range(start, end); + + return zero_filled; +} + +void G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) { + guarantee(is_area_committed(start, size_in_pages), "checking"); + + if (_special) { + // Mark that memory is dirty. If committed again the memory might + // need to be cleared explicitly. + _dirty.set_range(start, start + size_in_pages); + } else { + os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages)); + } + + _committed.clear_range(start, start + size_in_pages); +} + +bool G1PageBasedVirtualSpace::contains(const void* p) const { + return _low_boundary <= (const char*) p && (const char*) p < _high_boundary; +} + +#ifndef PRODUCT +void G1PageBasedVirtualSpace::print_on(outputStream* out) { + out->print ("Virtual space:"); + if (_special) out->print(" (pinned in memory)"); + out->cr(); + out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); + out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); + out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary)); +} + +void G1PageBasedVirtualSpace::print() { + print_on(tty); +} +#endif --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP + +#include "memory/allocation.hpp" +#include "memory/memRegion.hpp" +#include "runtime/virtualspace.hpp" +#include "utilities/bitMap.hpp" + +// Virtual space management helper for a virtual space with an OS page allocation +// granularity. +// (De-)Allocation requests are always OS page aligned by passing a page index +// and multiples of pages. +// The implementation gives an error when trying to commit or uncommit pages that +// have already been committed or uncommitted. +class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC { + friend class VMStructs; + private: + // Reserved area addresses. + char* _low_boundary; + char* _high_boundary; + + // The commit/uncommit granularity in bytes. + size_t _page_size; + + // Bitmap used for verification of commit/uncommit operations. + BitMap _committed; + + // Bitmap used to keep track of which pages are dirty or not for _special + // spaces. This is needed because for those spaces the underlying memory + // will only be zero filled the first time it is committed. Calls to commit + // will use this bitmap and return whether or not the memory is zero filled. + BitMap _dirty; + + // Indicates that the entire space has been committed and pinned in memory, + // os::commit_memory() or os::uncommit_memory() have no function. + bool _special; + + // Indicates whether the committed space should be executable. + bool _executable; + + // Returns the index of the page which contains the given address. + uintptr_t addr_to_page_index(char* addr) const; + // Returns the address of the given page index. + char* page_start(uintptr_t index); + // Returns the byte size of the given number of pages. + size_t byte_size_for_pages(size_t num); + + // Returns true if the entire area is backed by committed memory. + bool is_area_committed(uintptr_t start, size_t size_in_pages) const; + // Returns true if the entire area is not backed by committed memory. + bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const; + + public: + + // Commit the given area of pages starting at start being size_in_pages large. + // Returns true if the given area is zero filled upon completion. + bool commit(uintptr_t start, size_t size_in_pages); + + // Uncommit the given area of pages starting at start being size_in_pages large. + void uncommit(uintptr_t start, size_t size_in_pages); + + // Initialization + G1PageBasedVirtualSpace(); + bool initialize_with_granularity(ReservedSpace rs, size_t page_size); + + // Destruction + ~G1PageBasedVirtualSpace(); + + // Amount of reserved memory. + size_t reserved_size() const; + // Memory used in this virtual space. + size_t committed_size() const; + // Memory left to use/expand in this virtual space. + size_t uncommitted_size() const; + + bool contains(const void* p) const; + + MemRegion reserved() { + MemRegion x((HeapWord*)_low_boundary, reserved_size() / HeapWordSize); + return x; + } + + void release(); + + void check_for_contiguity() PRODUCT_RETURN; + + // Debugging + void print_on(outputStream* out) PRODUCT_RETURN; + void print(); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/g1OopClosures.inline.hpp" +#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp" +#include "oops/oop.inline.hpp" +#include "oops/oop.pcgc.inline.hpp" +#include "runtime/prefetch.inline.hpp" + +G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp) + : _g1h(g1h), + _refs(g1h->task_queue(queue_num)), + _dcq(&g1h->dirty_card_queue_set()), + _ct_bs(g1h->g1_barrier_set()), + _g1_rem(g1h->g1_rem_set()), + _hash_seed(17), _queue_num(queue_num), + _term_attempts(0), + _age_table(false), _scanner(g1h, rp), + _strong_roots_time(0), _term_time(0) { + _scanner.set_par_scan_thread_state(this); + // we allocate G1YoungSurvRateNumRegions plus one entries, since + // we "sacrifice" entry 0 to keep track of surviving bytes for + // non-young regions (where the age is -1) + // We also add a few elements at the beginning and at the end in + // an attempt to eliminate cache contention + uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length(); + uint array_length = PADDING_ELEM_NUM + + real_length + + PADDING_ELEM_NUM; + _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); + if (_surviving_young_words_base == NULL) + vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR, + "Not enough space for young surv histo."); + _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; + memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t)); + + _g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h); + + _start = os::elapsedTime(); +} + +G1ParScanThreadState::~G1ParScanThreadState() { + _g1_par_allocator->retire_alloc_buffers(); + delete _g1_par_allocator; + FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC); +} + +void +G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) +{ + st->print_raw_cr("GC Termination Stats"); + st->print_raw_cr(" elapsed --strong roots-- -------termination-------" + " ------waste (KiB)------"); + st->print_raw_cr("thr ms ms % ms % attempts" + " total alloc undo"); + st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" + " ------- ------- -------"); +} + +void +G1ParScanThreadState::print_termination_stats(int i, + outputStream* const st) const +{ + const double elapsed_ms = elapsed_time() * 1000.0; + const double s_roots_ms = strong_roots_time() * 1000.0; + const double term_ms = term_time() * 1000.0; + const size_t alloc_buffer_waste = _g1_par_allocator->alloc_buffer_waste(); + const size_t undo_waste = _g1_par_allocator->undo_waste(); + st->print_cr("%3d %9.2f %9.2f %6.2f " + "%9.2f %6.2f " SIZE_FORMAT_W(8) " " + SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), + i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, + term_ms, term_ms * 100 / elapsed_ms, term_attempts(), + (alloc_buffer_waste + undo_waste) * HeapWordSize / K, + alloc_buffer_waste * HeapWordSize / K, + undo_waste * HeapWordSize / K); +} + +#ifdef ASSERT +bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { + assert(ref != NULL, "invariant"); + assert(UseCompressedOops, "sanity"); + assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, p2i(ref))); + oop p = oopDesc::load_decode_heap_oop(ref); + assert(_g1h->is_in_g1_reserved(p), + err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); + return true; +} + +bool G1ParScanThreadState::verify_ref(oop* ref) const { + assert(ref != NULL, "invariant"); + if (has_partial_array_mask(ref)) { + // Must be in the collection set--it's already been copied. + oop p = clear_partial_array_mask(ref); + assert(_g1h->obj_in_cs(p), + err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); + } else { + oop p = oopDesc::load_decode_heap_oop(ref); + assert(_g1h->is_in_g1_reserved(p), + err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); + } + return true; +} + +bool G1ParScanThreadState::verify_task(StarTask ref) const { + if (ref.is_narrow()) { + return verify_ref((narrowOop*) ref); + } else { + return verify_ref((oop*) ref); + } +} +#endif // ASSERT + +void G1ParScanThreadState::trim_queue() { + assert(_evac_failure_cl != NULL, "not set"); + + StarTask ref; + do { + // Drain the overflow stack first, so other threads can steal. + while (_refs->pop_overflow(ref)) { + dispatch_reference(ref); + } + + while (_refs->pop_local(ref)) { + dispatch_reference(ref); + } + } while (!_refs->is_empty()); +} + +oop G1ParScanThreadState::copy_to_survivor_space(oop const old) { + size_t word_sz = old->size(); + HeapRegion* from_region = _g1h->heap_region_containing_raw(old); + // +1 to make the -1 indexes valid... + int young_index = from_region->young_index_in_cset()+1; + assert( (from_region->is_young() && young_index > 0) || + (!from_region->is_young() && young_index == 0), "invariant" ); + G1CollectorPolicy* g1p = _g1h->g1_policy(); + markOop m = old->mark(); + int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() + : m->age(); + GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, + word_sz); + AllocationContext_t context = from_region->allocation_context(); + HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context); +#ifndef PRODUCT + // Should this evacuation fail? + if (_g1h->evacuation_should_fail()) { + if (obj_ptr != NULL) { + _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context); + obj_ptr = NULL; + } + } +#endif // !PRODUCT + + if (obj_ptr == NULL) { + // This will either forward-to-self, or detect that someone else has + // installed a forwarding pointer. + return _g1h->handle_evacuation_failure_par(this, old); + } + + oop obj = oop(obj_ptr); + + // We're going to allocate linearly, so might as well prefetch ahead. + Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); + + oop forward_ptr = old->forward_to_atomic(obj); + if (forward_ptr == NULL) { + Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); + + // alloc_purpose is just a hint to allocate() above, recheck the type of region + // we actually allocated from and update alloc_purpose accordingly + HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr); + alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured; + + if (g1p->track_object_age(alloc_purpose)) { + // We could simply do obj->incr_age(). However, this causes a + // performance issue. obj->incr_age() will first check whether + // the object has a displaced mark by checking its mark word; + // getting the mark word from the new location of the object + // stalls. So, given that we already have the mark word and we + // are about to install it anyway, it's better to increase the + // age on the mark word, when the object does not have a + // displaced mark word. We're not expecting many objects to have + // a displaced marked word, so that case is not optimized + // further (it could be...) and we simply call obj->incr_age(). + + if (m->has_displaced_mark_helper()) { + // in this case, we have to install the mark word first, + // otherwise obj looks to be forwarded (the old mark word, + // which contains the forward pointer, was copied) + obj->set_mark(m); + obj->incr_age(); + } else { + m = m->incr_age(); + obj->set_mark(m); + } + age_table()->add(obj, word_sz); + } else { + obj->set_mark(m); + } + + if (G1StringDedup::is_enabled()) { + G1StringDedup::enqueue_from_evacuation(from_region->is_young(), + to_region->is_young(), + queue_num(), + obj); + } + + size_t* surv_young_words = surviving_young_words(); + surv_young_words[young_index] += word_sz; + + if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { + // We keep track of the next start index in the length field of + // the to-space object. The actual length can be found in the + // length field of the from-space object. + arrayOop(obj)->set_length(0); + oop* old_p = set_partial_array_mask(old); + push_on_queue(old_p); + } else { + // No point in using the slower heap_region_containing() method, + // given that we know obj is in the heap. + _scanner.set_region(_g1h->heap_region_containing_raw(obj)); + obj->oop_iterate_backwards(&_scanner); + } + } else { + _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context); + obj = forward_ptr; + } + return obj; +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP + +#include "gc_implementation/g1/dirtyCardQueue.hpp" +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +#include "gc_implementation/g1/g1CollectedHeap.hpp" +#include "gc_implementation/g1/g1CollectorPolicy.hpp" +#include "gc_implementation/g1/g1OopClosures.hpp" +#include "gc_implementation/g1/g1RemSet.hpp" +#include "gc_implementation/shared/ageTable.hpp" +#include "memory/allocation.hpp" +#include "oops/oop.hpp" + +class HeapRegion; +class outputStream; + +class G1ParScanThreadState : public StackObj { + private: + G1CollectedHeap* _g1h; + RefToScanQueue* _refs; + DirtyCardQueue _dcq; + G1SATBCardTableModRefBS* _ct_bs; + G1RemSet* _g1_rem; + + G1ParGCAllocator* _g1_par_allocator; + + ageTable _age_table; + + G1ParScanClosure _scanner; + + size_t _alloc_buffer_waste; + size_t _undo_waste; + + OopsInHeapRegionClosure* _evac_failure_cl; + + int _hash_seed; + uint _queue_num; + + size_t _term_attempts; + + double _start; + double _start_strong_roots; + double _strong_roots_time; + double _start_term; + double _term_time; + + // Map from young-age-index (0 == not young, 1 is youngest) to + // surviving words. base is what we get back from the malloc call + size_t* _surviving_young_words_base; + // this points into the array, as we use the first few entries for padding + size_t* _surviving_young_words; + +#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) + + void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } + void add_to_undo_waste(size_t waste) { _undo_waste += waste; } + + DirtyCardQueue& dirty_card_queue() { return _dcq; } + G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } + + public: + G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp); + ~G1ParScanThreadState(); + + ageTable* age_table() { return &_age_table; } + +#ifdef ASSERT + bool queue_is_empty() const { return _refs->is_empty(); } + + bool verify_ref(narrowOop* ref) const; + bool verify_ref(oop* ref) const; + bool verify_task(StarTask ref) const; +#endif // ASSERT + + template void push_on_queue(T* ref) { + assert(verify_ref(ref), "sanity"); + _refs->push(ref); + } + + template void update_rs(HeapRegion* from, T* p, int tid) { + // If the new value of the field points to the same region or + // is the to-space, we don't need to include it in the Rset updates. + if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) { + size_t card_index = ctbs()->index_for(p); + // If the card hasn't been added to the buffer, do it. + if (ctbs()->mark_card_deferred(card_index)) { + dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); + } + } + } + public: + + void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { + _evac_failure_cl = evac_failure_cl; + } + + OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; } + + int* hash_seed() { return &_hash_seed; } + uint queue_num() { return _queue_num; } + + size_t term_attempts() const { return _term_attempts; } + void note_term_attempt() { _term_attempts++; } + + void start_strong_roots() { + _start_strong_roots = os::elapsedTime(); + } + void end_strong_roots() { + _strong_roots_time += (os::elapsedTime() - _start_strong_roots); + } + double strong_roots_time() const { return _strong_roots_time; } + + void start_term_time() { + note_term_attempt(); + _start_term = os::elapsedTime(); + } + void end_term_time() { + _term_time += (os::elapsedTime() - _start_term); + } + double term_time() const { return _term_time; } + + double elapsed_time() const { + return os::elapsedTime() - _start; + } + + static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty); + void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const; + + size_t* surviving_young_words() { + // We add on to hide entry 0 which accumulates surviving words for + // age -1 regions (i.e. non-young ones) + return _surviving_young_words; + } + + private: + #define G1_PARTIAL_ARRAY_MASK 0x2 + + inline bool has_partial_array_mask(oop* ref) const { + return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK; + } + + // We never encode partial array oops as narrowOop*, so return false immediately. + // This allows the compiler to create optimized code when popping references from + // the work queue. + inline bool has_partial_array_mask(narrowOop* ref) const { + assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*"); + return false; + } + + // Only implement set_partial_array_mask() for regular oops, not for narrowOops. + // We always encode partial arrays as regular oop, to allow the + // specialization for has_partial_array_mask() for narrowOops above. + // This means that unintentional use of this method with narrowOops are caught + // by the compiler. + inline oop* set_partial_array_mask(oop obj) const { + assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!"); + return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK); + } + + inline oop clear_partial_array_mask(oop* ref) const { + return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); + } + + inline void do_oop_partial_array(oop* p); + + // This method is applied to the fields of the objects that have just been copied. + template inline void do_oop_evac(T* p, HeapRegion* from); + + template inline void deal_with_reference(T* ref_to_scan); + + inline void dispatch_reference(StarTask ref); + public: + + oop copy_to_survivor_space(oop const obj); + + void trim_queue(); + + inline void steal_and_trim_queue(RefToScanQueueSet *task_queues); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP + +#include "gc_implementation/g1/g1ParScanThreadState.hpp" +#include "gc_implementation/g1/g1RemSet.inline.hpp" +#include "oops/oop.inline.hpp" + +template void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) { + assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)), + "Reference should not be NULL here as such are never pushed to the task queue."); + oop obj = oopDesc::load_decode_heap_oop_not_null(p); + + // Although we never intentionally push references outside of the collection + // set, due to (benign) races in the claim mechanism during RSet scanning more + // than one thread might claim the same card. So the same card may be + // processed multiple times. So redo this check. + G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj); + if (in_cset_state == G1CollectedHeap::InCSet) { + oop forwardee; + if (obj->is_forwarded()) { + forwardee = obj->forwardee(); + } else { + forwardee = copy_to_survivor_space(obj); + } + oopDesc::encode_store_heap_oop(p, forwardee); + } else if (in_cset_state == G1CollectedHeap::IsHumongous) { + _g1h->set_humongous_is_live(obj); + } else { + assert(in_cset_state == G1CollectedHeap::InNeither, + err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state)); + } + + assert(obj != NULL, "Must be"); + update_rs(from, p, queue_num()); +} + +inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { + assert(has_partial_array_mask(p), "invariant"); + oop from_obj = clear_partial_array_mask(p); + + assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap."); + assert(from_obj->is_objArray(), "must be obj array"); + objArrayOop from_obj_array = objArrayOop(from_obj); + // The from-space object contains the real length. + int length = from_obj_array->length(); + + assert(from_obj->is_forwarded(), "must be forwarded"); + oop to_obj = from_obj->forwardee(); + assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); + objArrayOop to_obj_array = objArrayOop(to_obj); + // We keep track of the next start index in the length field of the + // to-space object. + int next_index = to_obj_array->length(); + assert(0 <= next_index && next_index < length, + err_msg("invariant, next index: %d, length: %d", next_index, length)); + + int start = next_index; + int end = length; + int remainder = end - start; + // We'll try not to push a range that's smaller than ParGCArrayScanChunk. + if (remainder > 2 * ParGCArrayScanChunk) { + end = start + ParGCArrayScanChunk; + to_obj_array->set_length(end); + // Push the remainder before we process the range in case another + // worker has run out of things to do and can steal it. + oop* from_obj_p = set_partial_array_mask(from_obj); + push_on_queue(from_obj_p); + } else { + assert(length == end, "sanity"); + // We'll process the final range for this object. Restore the length + // so that the heap remains parsable in case of evacuation failure. + to_obj_array->set_length(end); + } + _scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); + // Process indexes [start,end). It will also process the header + // along with the first chunk (i.e., the chunk with start == 0). + // Note that at this point the length field of to_obj_array is not + // correct given that we are using it to keep track of the next + // start index. oop_iterate_range() (thankfully!) ignores the length + // field and only relies on the start / end parameters. It does + // however return the size of the object which will be incorrect. So + // we have to ignore it even if we wanted to use it. + to_obj_array->oop_iterate_range(&_scanner, start, end); +} + +template inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { + if (!has_partial_array_mask(ref_to_scan)) { + // Note: we can use "raw" versions of "region_containing" because + // "obj_to_scan" is definitely in the heap, and is not in a + // humongous region. + HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); + do_oop_evac(ref_to_scan, r); + } else { + do_oop_partial_array((oop*)ref_to_scan); + } +} + +inline void G1ParScanThreadState::dispatch_reference(StarTask ref) { + assert(verify_task(ref), "sanity"); + if (ref.is_narrow()) { + deal_with_reference((narrowOop*)ref); + } else { + deal_with_reference((oop*)ref); + } +} + +void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) { + StarTask stolen_task; + while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) { + assert(verify_task(stolen_task), "sanity"); + dispatch_reference(stolen_task); + + // We've just processed a reference and we might have made + // available new entries on the queues. So we have to make sure + // we drain the queues as necessary. + trim_queue(); + } +} + +#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */ + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1BiasedArray.hpp" +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" +#include "memory/allocation.inline.hpp" +#include "runtime/virtualspace.hpp" +#include "services/memTracker.hpp" +#include "utilities/bitMap.inline.hpp" + +G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, + size_t commit_granularity, + size_t region_granularity, + MemoryType type) : + _storage(), + _commit_granularity(commit_granularity), + _region_granularity(region_granularity), + _listener(NULL), + _commit_map() { + guarantee(is_power_of_2(commit_granularity), "must be"); + guarantee(is_power_of_2(region_granularity), "must be"); + _storage.initialize_with_granularity(rs, commit_granularity); + + MemTracker::record_virtual_memory_type((address)rs.base(), type); +} + +// G1RegionToSpaceMapper implementation where the region granularity is larger than +// or the same as the commit granularity. +// Basically, the space corresponding to one region region spans several OS pages. +class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper { + private: + size_t _pages_per_region; + + public: + G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs, + size_t os_commit_granularity, + size_t alloc_granularity, + size_t commit_factor, + MemoryType type) : + G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type), + _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) { + + guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity"); + _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); + } + + virtual void commit_regions(uintptr_t start_idx, size_t num_regions) { + bool zero_filled = _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region); + _commit_map.set_range(start_idx, start_idx + num_regions); + fire_on_commit(start_idx, num_regions, zero_filled); + } + + virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) { + _storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region); + _commit_map.clear_range(start_idx, start_idx + num_regions); + } +}; + +// G1RegionToSpaceMapper implementation where the region granularity is smaller +// than the commit granularity. +// Basically, the contents of one OS page span several regions. +class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { + private: + class CommitRefcountArray : public G1BiasedMappedArray { + protected: + virtual uint default_value() const { return 0; } + }; + + size_t _regions_per_page; + + CommitRefcountArray _refcounts; + + uintptr_t region_idx_to_page_idx(uint region) const { + return region / _regions_per_page; + } + + public: + G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs, + size_t os_commit_granularity, + size_t alloc_granularity, + size_t commit_factor, + MemoryType type) : + G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type), + _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() { + + guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); + _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity); + _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); + } + + virtual void commit_regions(uintptr_t start_idx, size_t num_regions) { + for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) { + assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i)); + uintptr_t idx = region_idx_to_page_idx(i); + uint old_refcount = _refcounts.get_by_index(idx); + bool zero_filled = false; + if (old_refcount == 0) { + zero_filled = _storage.commit(idx, 1); + } + _refcounts.set_by_index(idx, old_refcount + 1); + _commit_map.set_bit(i); + fire_on_commit(i, 1, zero_filled); + } + } + + virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) { + for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) { + assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i)); + uintptr_t idx = region_idx_to_page_idx(i); + uint old_refcount = _refcounts.get_by_index(idx); + assert(old_refcount > 0, "must be"); + if (old_refcount == 1) { + _storage.uncommit(idx, 1); + } + _refcounts.set_by_index(idx, old_refcount - 1); + _commit_map.clear_bit(i); + } + } +}; + +void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled) { + if (_listener != NULL) { + _listener->on_commit(start_idx, num_regions, zero_filled); + } +} + +G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs, + size_t os_commit_granularity, + size_t region_granularity, + size_t commit_factor, + MemoryType type) { + + if (region_granularity >= (os_commit_granularity * commit_factor)) { + return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type); + } else { + return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP + +#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp" +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" + +class G1MappingChangedListener VALUE_OBJ_CLASS_SPEC { + public: + // Fired after commit of the memory, i.e. the memory this listener is registered + // for can be accessed. + // Zero_filled indicates that the memory can be considered as filled with zero bytes + // when called. + virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled) = 0; +}; + +// Maps region based commit/uncommit requests to the underlying page sized virtual +// space. +class G1RegionToSpaceMapper : public CHeapObj { + private: + G1MappingChangedListener* _listener; + protected: + // Backing storage. + G1PageBasedVirtualSpace _storage; + size_t _commit_granularity; + size_t _region_granularity; + // Mapping management + BitMap _commit_map; + + G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type); + + void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled); + public: + MemRegion reserved() { return _storage.reserved(); } + + void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; } + + virtual ~G1RegionToSpaceMapper() { + _commit_map.resize(0, /* in_resource_area */ false); + } + + bool is_committed(uintptr_t idx) const { + return _commit_map.at(idx); + } + + virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0; + virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0; + + // Creates an appropriate G1RegionToSpaceMapper for the given parameters. + // The byte_translation_factor defines how many bytes in a region correspond to + // a single byte in the data structure this mapper is for. + // Eg. in the card table, this value corresponds to the size a single card + // table entry corresponds to. + static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs, + size_t os_commit_granularity, + size_t region_granularity, + size_t byte_translation_factor, + MemoryType type); +}; + +#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP */ --- ./hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -23,7 +23,6 @@ */ #include "precompiled.hpp" -#include "gc_implementation/g1/bufferingOopClosure.hpp" #include "gc_implementation/g1/concurrentG1Refine.hpp" #include "gc_implementation/g1/concurrentG1RefineThread.hpp" #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" @@ -33,7 +32,7 @@ #include "gc_implementation/g1/g1GCPhaseTimes.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "memory/iterator.hpp" #include "oops/oop.inline.hpp" @@ -110,7 +109,7 @@ G1CollectedHeap* _g1h; OopsInHeapRegionClosure* _oc; - CodeBlobToOopClosure* _code_root_cl; + CodeBlobClosure* _code_root_cl; G1BlockOffsetSharedArray* _bot_shared; G1SATBCardTableModRefBS *_ct_bs; @@ -122,7 +121,7 @@ public: ScanRSClosure(OopsInHeapRegionClosure* oc, - CodeBlobToOopClosure* code_root_cl, + CodeBlobClosure* code_root_cl, uint worker_i) : _oc(oc), _code_root_cl(code_root_cl), @@ -212,7 +211,6 @@ #endif HeapRegion* card_region = _g1h->heap_region_containing(card_start); - assert(card_region != NULL, "Yielding cards not in the heap?"); _cards++; if (!card_region->is_on_dirty_cards_region_list()) { @@ -243,7 +241,7 @@ }; void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, - CodeBlobToOopClosure* code_root_cl, + CodeBlobClosure* code_root_cl, uint worker_i) { double rs_time_start = os::elapsedTime(); HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i); @@ -322,7 +320,7 @@ } void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, - CodeBlobToOopClosure* code_root_cl, + CodeBlobClosure* code_root_cl, uint worker_i) { #if CARD_REPEAT_HISTO ct_freq_update_histo_and_reset(); @@ -340,12 +338,8 @@ // are just discarded (there's no need to update the RSets of regions // that were in the collection set - after the pause these regions // are wholly 'free' of live objects. In the event of an evacuation - // failure the cards/buffers in this queue set are: - // * passed to the DirtyCardQueueSet that is used to manage deferred - // RSet updates, or - // * scanned for references that point into the collection set - // and the RSet of the corresponding region in the collection set - // is updated immediately. + // failure the cards/buffers in this queue set are passed to the + // DirtyCardQueueSet that is used to manage RSet updates DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); assert((ParallelGCThreads > 0) || worker_i == 0, "invariant"); @@ -374,7 +368,6 @@ void G1RemSet::prepare_for_oops_into_collection_set_do() { cleanupHRRS(); - ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); _g1->set_refine_cte_cl_concurrency(false); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); dcqs.concatenate_logs(); @@ -387,67 +380,6 @@ _total_cards_scanned = 0; } - -// This closure, applied to a DirtyCardQueueSet, is used to immediately -// update the RSets for the regions in the CSet. For each card it iterates -// through the oops which coincide with that card. It scans the reference -// fields in each oop; when it finds an oop that points into the collection -// set, the RSet for the region containing the referenced object is updated. -class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure { - G1CollectedHeap* _g1; - CardTableModRefBS* _ct_bs; -public: - UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1, - CardTableModRefBS* bs): - _g1(g1), _ct_bs(bs) - { } - - bool do_card_ptr(jbyte* card_ptr, uint worker_i) { - // Construct the region representing the card. - HeapWord* start = _ct_bs->addr_for(card_ptr); - // And find the region containing it. - HeapRegion* r = _g1->heap_region_containing(start); - assert(r != NULL, "unexpected null"); - - // Scan oops in the card looking for references into the collection set - // Don't use addr_for(card_ptr + 1) which can ask for - // a card beyond the heap. This is not safe without a perm - // gen. - HeapWord* end = start + CardTableModRefBS::card_size_in_words; - MemRegion scanRegion(start, end); - - UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set()); - FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl); - FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl); - - // We can pass false as the "filter_young" parameter here as: - // * we should be in a STW pause, - // * the DCQS to which this closure is applied is used to hold - // references that point into the collection set from the prior - // RSet updating, - // * the post-write barrier shouldn't be logging updates to young - // regions (but there is a situation where this can happen - see - // the comment in G1RemSet::refine_card() below - - // that should not be applicable here), and - // * during actual RSet updating, the filtering of cards in young - // regions in HeapRegion::oops_on_card_seq_iterate_careful is - // employed. - // As a result, when this closure is applied to "refs into cset" - // DCQS, we shouldn't see any cards in young regions. - update_rs_cl.set_region(r); - HeapWord* stop_point = - r->oops_on_card_seq_iterate_careful(scanRegion, - &filter_then_update_rs_cset_oop_cl, - false /* filter_young */, - NULL /* card_ptr */); - - // Since this is performed in the event of an evacuation failure, we - // we shouldn't see a non-null stop point - assert(stop_point == NULL, "saw an unallocated region"); - return true; - } -}; - void G1RemSet::cleanup_after_oops_into_collection_set_do() { guarantee( _cards_scanned != NULL, "invariant" ); _total_cards_scanned = 0; @@ -468,25 +400,10 @@ double restore_remembered_set_start = os::elapsedTime(); // Restore remembered sets for the regions pointing into the collection set. - if (G1DeferredRSUpdate) { - // If deferred RS updates are enabled then we just need to transfer - // the completed buffers from (a) the DirtyCardQueueSet used to hold - // cards that contain references that point into the collection set - // to (b) the DCQS used to hold the deferred RS updates - _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs); - } else { - - CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set(); - UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs); - - int n_completed_buffers = 0; - while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate, - 0, 0, true)) { - n_completed_buffers++; - } - assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers"); - } - + // We just need to transfer the completed buffers from the DirtyCardQueueSet + // used to hold cards that contain references that point into the collection set + // to the DCQS used to hold the deferred RS updates. + _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs); _g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0); } @@ -557,6 +474,12 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i, bool check_for_refs_into_cset) { + assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)), + err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap", + p2i(card_ptr), + _ct_bs->index_for(_ct_bs->addr_for(card_ptr)), + _ct_bs->addr_for(card_ptr), + _g1->addr_to_region(_ct_bs->addr_for(card_ptr)))); // If the card is no longer dirty, nothing to do. if (*card_ptr != CardTableModRefBS::dirty_card_val()) { @@ -569,11 +492,6 @@ HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); - if (r == NULL) { - // Again no need to return that this card contains refs that - // point into the collection set. - return false; // Not in the G1 heap (might be in perm, for example.) - } // Why do we have to check here whether a card is on a young region, // given that we dirty young regions and, as a result, the @@ -626,10 +544,6 @@ start = _ct_bs->addr_for(card_ptr); r = _g1->heap_region_containing(start); - if (r == NULL) { - // Not in the G1 heap - return false; - } // Checking whether the region we got back from the cache // is young here is inappropriate. The region could have been --- ./hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,7 +96,7 @@ // the "i" passed to the calling thread's work(i) function. // In the sequential case this param will be ignored. void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, - CodeBlobToOopClosure* code_root_cl, + CodeBlobClosure* code_root_cl, uint worker_i); // Prepare for and cleanup after an oops_into_collection_set_do @@ -108,7 +108,7 @@ void cleanup_after_oops_into_collection_set_do(); void scanRS(OopsInHeapRegionClosure* oc, - CodeBlobToOopClosure* code_root_cl, + CodeBlobClosure* code_root_cl, uint worker_i); void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i); @@ -193,18 +193,4 @@ bool apply_to_weak_ref_discovered_field() { return true; } }; -class UpdateRSetImmediate: public OopsInHeapRegionClosure { -private: - G1RemSet* _g1_rem_set; - - template void do_oop_work(T* p); -public: - UpdateRSetImmediate(G1RemSet* rs) : - _g1_rem_set(rs) {} - - virtual void do_oop(narrowOop* p) { do_oop_work(p); } - virtual void do_oop( oop* p) { do_oop_work(p); } -}; - - #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP #include "gc_implementation/g1/g1RemSet.hpp" +#include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "oops/oop.inline.hpp" @@ -45,26 +46,28 @@ template inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) { oop obj = oopDesc::load_decode_heap_oop(p); + if (obj == NULL) { + return; + } + #ifdef ASSERT // can't do because of races // assert(obj == NULL || obj->is_oop(), "expected an oop"); // Do the safe subset of is_oop - if (obj != NULL) { #ifdef CHECK_UNHANDLED_OOPS - oopDesc* o = obj.obj(); + oopDesc* o = obj.obj(); #else - oopDesc* o = obj; + oopDesc* o = obj; #endif // CHECK_UNHANDLED_OOPS - assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); - assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); - } + assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); + assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); #endif // ASSERT assert(from == NULL || from->is_in_reserved(p), "p is not in from"); HeapRegion* to = _g1->heap_region_containing(obj); - if (to != NULL && from != to) { + if (from != to) { assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); to->rem_set()->add_reference(p, tid); } @@ -76,13 +79,4 @@ _rs->par_write_ref(_from, p, _worker_i); } -template -inline void UpdateRSetImmediate::do_oop_work(T* p) { - assert(_from->is_in_reserved(p), "paranoia"); - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) { - _g1_rem_set->par_write_ref(_from, p, 0); - } -} - #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -253,19 +253,22 @@ size_t occupied_cards = hrrs->occupied(); size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size(); if (code_root_mem_sz > max_code_root_mem_sz()) { + _max_code_root_mem_sz = code_root_mem_sz; _max_code_root_mem_sz_region = r; } size_t code_root_elems = hrrs->strong_code_roots_list_length(); RegionTypeCounter* current = NULL; - if (r->is_young()) { + if (r->is_free()) { + current = &_free; + } else if (r->is_young()) { current = &_young; } else if (r->isHumongous()) { current = &_humonguous; - } else if (r->is_empty()) { - current = &_free; + } else if (r->is_old()) { + current = &_old; } else { - current = &_old; + ShouldNotReachHere(); } current->add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems); _all.add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems); --- ./hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -23,10 +23,12 @@ */ #include "precompiled.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/satbQueue.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/thread.inline.hpp" G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap, @@ -36,7 +38,6 @@ _kind = G1SATBCT; } - void G1SATBCardTableModRefBS::enqueue(oop pre_val) { // Nulls should have been already filtered. assert(pre_val->is_oop(true), "Error"); @@ -64,6 +65,17 @@ } } +void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) { + if (!dest_uninitialized) { + write_ref_array_pre_work(dst, count); + } +} +void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) { + if (!dest_uninitialized) { + write_ref_array_pre_work(dst, count); + } +} + bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) { jbyte val = _byte_map[card_index]; // It's already processed @@ -112,13 +124,53 @@ } #endif +void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { + // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter. + MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords); + _card_table->clear(mr); +} + G1SATBCardTableLoggingModRefBS:: G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, int max_covered_regions) : G1SATBCardTableModRefBS(whole_heap, max_covered_regions), - _dcqs(JavaThread::dirty_card_queue_set()) + _dcqs(JavaThread::dirty_card_queue_set()), + _listener() { _kind = G1SATBCTLogging; + _listener.set_card_table(this); +} + +void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) { + mapper->set_mapping_changed_listener(&_listener); + + _byte_map_size = mapper->reserved().byte_size(); + + _guard_index = cards_required(_whole_heap.word_size()) - 1; + _last_valid_index = _guard_index - 1; + + HeapWord* low_bound = _whole_heap.start(); + HeapWord* high_bound = _whole_heap.end(); + + _cur_covered_regions = 1; + _covered[0] = _whole_heap; + + _byte_map = (jbyte*) mapper->reserved().start(); + byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); + assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); + assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); + + if (TraceCardTableModRefBS) { + gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: "); + gclog_or_tty->print_cr(" " + " &_byte_map[0]: " INTPTR_FORMAT + " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, + p2i(&_byte_map[0]), + p2i(&_byte_map[_last_valid_index])); + gclog_or_tty->print_cr(" " + " byte_map_base: " INTPTR_FORMAT, + p2i(byte_map_base)); + } } void --- ./hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,14 +25,14 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "memory/cardTableModRefBS.hpp" #include "memory/memRegion.hpp" #include "oops/oop.inline.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS - class DirtyCardQueueSet; +class G1SATBCardTableLoggingModRefBS; // This barrier is specialized to use a logging barrier to support // snapshot-at-the-beginning marking. @@ -86,16 +86,8 @@ } template void write_ref_array_pre_work(T* dst, int count); - virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) { - if (!dest_uninitialized) { - write_ref_array_pre_work(dst, count); - } - } - virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) { - if (!dest_uninitialized) { - write_ref_array_pre_work(dst, count); - } - } + virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized); + virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized); /* Claimed and deferred bits are used together in G1 during the evacuation @@ -134,18 +126,40 @@ jbyte val = _byte_map[card_index]; return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); } +}; +class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener { + private: + G1SATBCardTableLoggingModRefBS* _card_table; + public: + G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { } + + void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; } + + virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); }; // Adds card-table logging to the post-barrier. // Usual invariant: all dirty cards are logged in the DirtyCardQueueSet. class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS { + friend class G1SATBCardTableLoggingModRefBSChangedListener; private: + G1SATBCardTableLoggingModRefBSChangedListener _listener; DirtyCardQueueSet& _dcqs; public: + static size_t compute_size(size_t mem_region_size_in_words) { + size_t number_of_slots = (mem_region_size_in_words / card_size_in_words); + return ReservedSpace::allocation_align_size_up(number_of_slots); + } + G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, int max_covered_regions); + virtual void initialize() { } + virtual void initialize(G1RegionToSpaceMapper* mapper); + + virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); } + bool is_a(BarrierSet::Name bsn) { return bsn == BarrierSet::G1SATBCTLogging || G1SATBCardTableModRefBS::is_a(bsn); @@ -162,11 +176,6 @@ void write_region_work(MemRegion mr) { invalidate(mr); } void write_ref_array_work(MemRegion mr) { invalidate(mr); } - - }; - -#endif // INCLUDE_ALL_GCS - #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -77,38 +77,37 @@ break; } - // Include this thread in safepoints - stsJoin(); + { + // Include thread in safepoints + SuspendibleThreadSetJoiner sts; - stat.mark_exec(); + stat.mark_exec(); - // Process the queue - for (;;) { - oop java_string = G1StringDedupQueue::pop(); - if (java_string == NULL) { - break; + // Process the queue + for (;;) { + oop java_string = G1StringDedupQueue::pop(); + if (java_string == NULL) { + break; + } + + G1StringDedupTable::deduplicate(java_string, stat); + + // Safepoint this thread if needed + if (sts.should_yield()) { + stat.mark_block(); + sts.yield(); + stat.mark_unblock(); + } } - G1StringDedupTable::deduplicate(java_string, stat); + G1StringDedupTable::trim_entry_cache(); - // Safepoint this thread if needed - if (stsShouldYield()) { - stat.mark_block(); - stsYield(NULL); - stat.mark_unblock(); - } + stat.mark_done(); + + // Print statistics + total_stat.add(stat); + print(gclog_or_tty, stat, total_stat); } - - G1StringDedupTable::trim_entry_cache(); - - stat.mark_done(); - - // Print statistics - total_stat.add(stat); - print(gclog_or_tty, stat, total_stat); - - // Exclude this thread from safepoints - stsLeave(); } terminate(); --- ./hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -108,9 +108,6 @@ develop(bool, G1RSBarrierRegionFilter, true, \ "If true, generate region filtering code in RS barrier") \ \ - develop(bool, G1DeferredRSUpdate, true, \ - "If true, use deferred RS updates") \ - \ develop(bool, G1RSLogCheckCardTable, false, \ "If true, verify that no dirty cards remain after RS log " \ "processing.") \ @@ -273,21 +270,24 @@ "Percentage (0-100) of the heap size to use as default " \ " maximum young gen size.") \ \ - experimental(uintx, G1MixedGCLiveThresholdPercent, 65, \ + experimental(uintx, G1MixedGCLiveThresholdPercent, 85, \ "Threshold for regions to be considered for inclusion in the " \ "collection set of mixed GCs. " \ "Regions with live bytes exceeding this will not be collected.") \ \ - product(uintx, G1HeapWastePercent, 10, \ + product(uintx, G1HeapWastePercent, 5, \ "Amount of space, expressed as a percentage of the heap size, " \ "that G1 is willing not to collect to avoid expensive GCs.") \ \ product(uintx, G1MixedGCCountTarget, 8, \ "The target number of mixed GCs after a marking cycle.") \ \ - experimental(uintx, G1CodeRootsChunkCacheKeepPercent, 10, \ - "The amount of code root chunks that should be kept at most " \ - "as percentage of already allocated.") \ + experimental(bool, G1ReclaimDeadHumongousObjectsAtYoungGC, true, \ + "Try to reclaim dead large objects at every young GC.") \ + \ + experimental(bool, G1TraceReclaimDeadHumongousObjectsAtYoungGC, false, \ + "Print some information about large object liveness " \ + "at every young GC.") \ \ experimental(uintx, G1OldCSetRegionThresholdPercent, 10, \ "An upper bound for the number of old CSet regions expressed " \ @@ -325,11 +325,14 @@ "evacuation pauses") \ \ diagnostic(bool, G1VerifyRSetsDuringFullGC, false, \ - "If true, perform verification of each heap region's " \ - "remembered set when verifying the heap during a full GC.") \ + "If true, perform verification of each heap region's " \ + "remembered set when verifying the heap during a full GC.") \ \ diagnostic(bool, G1VerifyHeapRegionCodeRoots, false, \ - "Verify the code root lists attached to each heap region.") + "Verify the code root lists attached to each heap region.") \ + \ + develop(bool, G1VerifyBitmaps, false, \ + "Verifies the consistency of the marking bitmaps") G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) --- ./hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -30,14 +30,21 @@ // non-virtually, using a mechanism defined in this file. Extend these // macros in the obvious way to add specializations for new closures. -// Forward declarations. enum G1Barrier { G1BarrierNone, G1BarrierEvac, G1BarrierKlass }; -template +enum G1Mark { + G1MarkNone, + G1MarkFromRoot, + G1MarkPromotedFromRoot +}; + +// Forward declarations. + +template class G1ParCopyClosure; class G1ParScanClosure; --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,11 +28,15 @@ #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/heapRegion.inline.hpp" +#include "gc_implementation/g1/heapRegionBounds.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" +#include "gc_implementation/shared/liveRange.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/iterator.hpp" +#include "memory/space.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/orderAccess.inline.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -46,7 +50,7 @@ HeapRegion* hr, ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, FilterKind fk) : - ContiguousSpaceDCTOC(hr, cl, precision, NULL), + DirtyCardToOopClosure(hr, cl, precision, NULL), _hr(hr), _fk(fk), _g1(g1) { } FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, @@ -58,7 +62,7 @@ HeapRegion* hr, HeapWord* cur, HeapWord* top) { oop cur_oop = oop(cur); - int oop_size = cur_oop->size(); + size_t oop_size = hr->block_size(cur); HeapWord* next_obj = cur + oop_size; while (next_obj < top) { // Keep filtering the remembered set. @@ -69,25 +73,24 @@ } cur = next_obj; cur_oop = oop(cur); - oop_size = cur_oop->size(); + oop_size = hr->block_size(cur); next_obj = cur + oop_size; } return cur; } -void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, - HeapWord* bottom, - HeapWord* top, - ExtendedOopClosure* cl) { +void HeapRegionDCTOC::walk_mem_region(MemRegion mr, + HeapWord* bottom, + HeapWord* top) { G1CollectedHeap* g1h = _g1; - int oop_size; + size_t oop_size; ExtendedOopClosure* cl2 = NULL; - FilterIntoCSClosure intoCSFilt(this, g1h, cl); - FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); + FilterIntoCSClosure intoCSFilt(this, g1h, _cl); + FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl); switch (_fk) { - case NoFilterKind: cl2 = cl; break; + case NoFilterKind: cl2 = _cl; break; case IntoCSFilterKind: cl2 = &intoCSFilt; break; case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; default: ShouldNotReachHere(); @@ -100,7 +103,7 @@ if (!g1h->is_obj_dead(oop(bottom), _hr)) { oop_size = oop(bottom)->oop_iterate(cl2, mr); } else { - oop_size = oop(bottom)->size(); + oop_size = _hr->block_size(bottom); } bottom += oop_size; @@ -109,17 +112,17 @@ // We replicate the loop below for several kinds of possible filters. switch (_fk) { case NoFilterKind: - bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); + bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top); break; case IntoCSFilterKind: { - FilterIntoCSClosure filt(this, g1h, cl); + FilterIntoCSClosure filt(this, g1h, _cl); bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); break; } case OutOfRegionFilterKind: { - FilterOutOfRegionClosure filt(_hr, cl); + FilterOutOfRegionClosure filt(_hr, _cl); bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); break; } @@ -135,32 +138,16 @@ } } -// Minimum region size; we won't go lower than that. -// We might want to decrease this in the future, to deal with small -// heaps a bit more efficiently. -#define MIN_REGION_SIZE ( 1024 * 1024 ) - -// Maximum region size; we don't go higher than that. There's a good -// reason for having an upper bound. We don't want regions to get too -// large, otherwise cleanup's effectiveness would decrease as there -// will be fewer opportunities to find totally empty regions after -// marking. -#define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) - -// The automatic region size calculation will try to have around this -// many regions in the heap (based on the min heap size). -#define TARGET_REGION_NUMBER 2048 - size_t HeapRegion::max_region_size() { - return (size_t)MAX_REGION_SIZE; + return HeapRegionBounds::max_size(); } void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { uintx region_size = G1HeapRegionSize; if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; - region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER, - (uintx) MIN_REGION_SIZE); + region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), + (uintx) HeapRegionBounds::min_size()); } int region_size_log = log2_long((jlong) region_size); @@ -170,10 +157,10 @@ region_size = ((uintx)1 << region_size_log); // Now make sure that we don't go over or under our limits. - if (region_size < MIN_REGION_SIZE) { - region_size = MIN_REGION_SIZE; - } else if (region_size > MAX_REGION_SIZE) { - region_size = MAX_REGION_SIZE; + if (region_size < HeapRegionBounds::min_size()) { + region_size = HeapRegionBounds::min_size(); + } else if (region_size > HeapRegionBounds::max_size()) { + region_size = HeapRegionBounds::max_size(); } // And recalculate the log. @@ -208,8 +195,6 @@ } void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { - assert(_humongous_type == NotHumongous, - "we should have already filtered out humongous regions"); assert(_humongous_start_region == NULL, "we should have already filtered out humongous regions"); assert(_end == _orig_end, @@ -217,9 +202,10 @@ _in_collection_set = false; + set_allocation_context(AllocationContext::system()); set_young_index_in_cset(-1); uninstall_surv_rate_group(); - set_young_type(NotYoung); + set_free(); reset_pre_dummy_top(); if (!par) { @@ -270,7 +256,7 @@ assert(top() == bottom(), "should be empty"); assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); - _humongous_type = StartsHumongous; + _type.set_starts_humongous(); _humongous_start_region = this; set_end(new_end); @@ -284,11 +270,11 @@ assert(top() == bottom(), "should be empty"); assert(first_hr->startsHumongous(), "pre-condition"); - _humongous_type = ContinuesHumongous; + _type.set_continues_humongous(); _humongous_start_region = first_hr; } -void HeapRegion::set_notHumongous() { +void HeapRegion::clear_humongous() { assert(isHumongous(), "pre-condition"); if (startsHumongous()) { @@ -304,7 +290,6 @@ } assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); - _humongous_type = NotHumongous; _humongous_start_region = NULL; } @@ -319,46 +304,19 @@ return false; } -HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { - HeapWord* low = addr; - HeapWord* high = end(); - while (low < high) { - size_t diff = pointer_delta(high, low); - // Must add one below to bias toward the high amount. Otherwise, if - // "high" were at the desired value, and "low" were one less, we - // would not converge on "high". This is not symmetric, because - // we set "high" to a block start, which might be the right one, - // which we don't do for "low". - HeapWord* middle = low + (diff+1)/2; - if (middle == high) return high; - HeapWord* mid_bs = block_start_careful(middle); - if (mid_bs < addr) { - low = middle; - } else { - high = mid_bs; - } - } - assert(low == high && low >= addr, "Didn't work."); - return low; -} - -#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away -#pragma warning( disable:4355 ) // 'this' : used in base member initializer list -#endif // _MSC_VER - - -HeapRegion::HeapRegion(uint hrs_index, +HeapRegion::HeapRegion(uint hrm_index, G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : G1OffsetTableContigSpace(sharedOffsetArray, mr), - _hrs_index(hrs_index), - _humongous_type(NotHumongous), _humongous_start_region(NULL), + _hrm_index(hrm_index), + _allocation_context(AllocationContext::system()), + _humongous_start_region(NULL), _in_collection_set(false), _next_in_special_set(NULL), _orig_end(NULL), _claimed(InitialClaimValue), _evacuation_failed(false), _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), - _young_type(NotYoung), _next_young_region(NULL), - _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false), + _next_young_region(NULL), + _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), #ifdef ASSERT _containing_set(NULL), #endif // ASSERT @@ -367,55 +325,24 @@ _predicted_bytes_to_copy(0) { _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); + assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); + + initialize(mr); +} + +void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { + assert(_rem_set->is_empty(), "Remembered set must be empty"); + + G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space); + _orig_end = mr.end(); - // Note that initialize() will set the start of the unmarked area of the - // region. hr_clear(false /*par*/, false /*clear_space*/); set_top(bottom()); - set_saved_mark(); - - assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); + record_top_and_timestamp(); } CompactibleSpace* HeapRegion::next_compaction_space() const { - // We're not using an iterator given that it will wrap around when - // it reaches the last region and this is not what we want here. - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - uint index = hrs_index() + 1; - while (index < g1h->n_regions()) { - HeapRegion* hr = g1h->region_at(index); - if (!hr->isHumongous()) { - return hr; - } - index += 1; - } - return NULL; -} - -void HeapRegion::save_marks() { - set_saved_mark(); -} - -void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) { - HeapWord* p = mr.start(); - HeapWord* e = mr.end(); - oop obj; - while (p < e) { - obj = oop(p); - p += obj->oop_iterate(cl); - } - assert(p == e, "bad memregion: doesn't end on obj boundary"); -} - -#define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ -void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ - ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ -} -SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) - - -void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) { - oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); + return G1CollectedHeap::heap()->next_compaction_region(this); } void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, @@ -423,7 +350,6 @@ // We always recreate the prev marking info and we'll explicitly // mark all objects we find to be self-forwarded on the prev // bitmap. So all objects need to be below PTAMS. - _prev_top_at_mark_start = top(); _prev_marked_bytes = 0; if (during_initial_mark) { @@ -447,6 +373,7 @@ assert(0 <= marked_bytes && marked_bytes <= used(), err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, marked_bytes, used())); + _prev_top_at_mark_start = top(); _prev_marked_bytes = marked_bytes; } @@ -477,7 +404,7 @@ if (cl->abort()) return cur; // The check above must occur before the operation below, since an // abort might invalidate the "size" operation. - cur += obj->size(); + cur += block_size(cur); } return NULL; } @@ -549,7 +476,7 @@ return cur; } // Otherwise... - next = (cur + obj->size()); + next = cur + block_size(cur); } // If we finish the above loop...We have a parseable object that @@ -557,10 +484,9 @@ // inside or spans the entire region. assert(obj == oop(cur), "sanity"); - assert(cur <= start && - obj->klass_or_null() != NULL && - (cur + obj->size()) > start, - "Loop postcondition"); + assert(cur <= start, "Loop postcondition"); + assert(obj->klass_or_null() != NULL, "Loop postcondition"); + assert((cur + block_size(cur)) > start, "Loop postcondition"); if (!g1h->is_obj_dead(obj)) { obj->oop_iterate(cl, mr); @@ -574,7 +500,7 @@ }; // Otherwise: - next = (cur + obj->size()); + next = cur + block_size(cur); if (!g1h->is_obj_dead(obj)) { if (next < end || !obj->is_objArray()) { @@ -600,21 +526,17 @@ hrrs->add_strong_code_root(nm); } +void HeapRegion::add_strong_code_root_locked(nmethod* nm) { + assert_locked_or_safepoint(CodeCache_lock); + HeapRegionRemSet* hrrs = rem_set(); + hrrs->add_strong_code_root_locked(nm); +} + void HeapRegion::remove_strong_code_root(nmethod* nm) { HeapRegionRemSet* hrrs = rem_set(); hrrs->remove_strong_code_root(nm); } -void HeapRegion::migrate_strong_code_roots() { - assert(in_collection_set(), "only collection set regions"); - assert(!isHumongous(), - err_msg("humongous region "HR_FORMAT" should not have been added to collection set", - HR_FORMAT_PARAMS(this))); - - HeapRegionRemSet* hrrs = rem_set(); - hrrs->migrate_strong_code_roots(); -} - void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { HeapRegionRemSet* hrrs = rem_set(); hrrs->strong_code_roots_do(blk); @@ -750,26 +672,12 @@ void HeapRegion::print() const { print_on(gclog_or_tty); } void HeapRegion::print_on(outputStream* st) const { - if (isHumongous()) { - if (startsHumongous()) - st->print(" HS"); - else - st->print(" HC"); - } else { - st->print(" "); - } + st->print("AC%4u", allocation_context()); + st->print(" %2s", get_short_type_str()); if (in_collection_set()) st->print(" CS"); else st->print(" "); - if (is_young()) - st->print(is_survivor() ? " SU" : " Y "); - else - st->print(" "); - if (is_empty()) - st->print(" F"); - else - st->print(" "); st->print(" TS %5d", _gc_time_stamp); st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, prev_top_at_mark_start(), next_top_at_mark_start()); @@ -929,10 +837,11 @@ size_t object_num = 0; while (p < top()) { oop obj = oop(p); - size_t obj_size = obj->size(); + size_t obj_size = block_size(p); object_num += 1; - if (is_humongous != g1->isHumongous(obj_size)) { + if (is_humongous != g1->isHumongous(obj_size) && + !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects. gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" SIZE_FORMAT" words) in a %shumongous region", p, g1->isHumongous(obj_size) ? "" : "non-", @@ -942,8 +851,10 @@ } // If it returns false, verify_for_object() will output the - // appropriate messasge. - if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) { + // appropriate message. + if (do_bot_verify && + !g1->is_obj_dead(obj, this) && + !_offsets.verify_for_object(p, obj_size)) { *failures = true; return; } @@ -951,7 +862,10 @@ if (!g1->is_obj_dead_cond(obj, this, vo)) { if (obj->is_oop()) { Klass* klass = obj->klass(); - if (!klass->is_metaspace_object()) { + bool is_metaspace_object = Metaspace::contains(klass) || + (vo == VerifyOption_G1UsePrevMarking && + ClassLoaderDataGraph::unload_list_contains(klass)); + if (!is_metaspace_object) { gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " "not metadata", klass, (void *)obj); *failures = true; @@ -1065,9 +979,10 @@ // away eventually. void G1OffsetTableContigSpace::clear(bool mangle_space) { - ContiguousSpace::clear(mangle_space); - _offsets.zero_bottom_entry(); - _offsets.initialize_threshold(); + set_top(bottom()); + set_saved_mark_word(bottom()); + CompactibleSpace::clear(mangle_space); + reset_bot(); } void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { @@ -1100,13 +1015,16 @@ HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { G1CollectedHeap* g1h = G1CollectedHeap::heap(); assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); - if (_gc_time_stamp < g1h->get_gc_time_stamp()) - return top(); - else - return ContiguousSpace::saved_mark_word(); + HeapWord* local_top = top(); + OrderAccess::loadload(); + if (_gc_time_stamp < g1h->get_gc_time_stamp()) { + return local_top; + } else { + return Space::saved_mark_word(); + } } -void G1OffsetTableContigSpace::set_saved_mark() { +void G1OffsetTableContigSpace::record_top_and_timestamp() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); @@ -1118,7 +1036,7 @@ // of region. If it does so after _gc_time_stamp = ..., then it // will pick up the right saved_mark_word() as the high water mark // of the region. Either way, the behaviour will be correct. - ContiguousSpace::set_saved_mark(); + Space::set_saved_mark_word(top()); OrderAccess::storestore(); _gc_time_stamp = curr_gc_time_stamp; // No need to do another barrier to flush the writes above. If @@ -1129,6 +1047,26 @@ } } +void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) { + object_iterate(blk); +} + +void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) { + HeapWord* p = bottom(); + while (p < top()) { + if (block_is_obj(p)) { + blk->do_object(oop(p)); + } + p += block_size(p); + } +} + +#define block_is_always_obj(q) true +void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) { + SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size); +} +#undef block_is_always_obj + G1OffsetTableContigSpace:: G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : @@ -1137,8 +1075,11 @@ _gc_time_stamp(0) { _offsets.set_space(this); - // false ==> we'll do the clearing if there's clearing to be done. - ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle); - _offsets.zero_bottom_entry(); - _offsets.initialize_threshold(); } + +void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { + CompactibleSpace::initialize(mr, clear_space, mangle_space); + _top = bottom(); + reset_bot(); +} + --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,8 +25,10 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP -#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" +#include "gc_implementation/g1/g1AllocationContext.hpp" +#include "gc_implementation/g1/g1BlockOffsetTable.hpp" #include "gc_implementation/g1/g1_specialized_oop_closures.hpp" +#include "gc_implementation/g1/heapRegionType.hpp" #include "gc_implementation/g1/survRateGroup.hpp" #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" @@ -34,8 +36,6 @@ #include "memory/watermark.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS - // A HeapRegion is the smallest piece of a G1CollectedHeap that // can be collected independently. @@ -46,8 +46,6 @@ // The solution is to remove this method from the definition // of a Space. -class CompactibleSpace; -class ContiguousSpace; class HeapRegionRemSet; class HeapRegionRemSetIterator; class HeapRegion; @@ -56,22 +54,19 @@ #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" #define HR_FORMAT_PARAMS(_hr_) \ - (_hr_)->hrs_index(), \ - (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \ - (_hr_)->startsHumongous() ? "HS" : \ - (_hr_)->continuesHumongous() ? "HC" : \ - !(_hr_)->is_empty() ? "O" : "F", \ + (_hr_)->hrm_index(), \ + (_hr_)->get_short_type_str(), \ p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end()) -// sentinel value for hrs_index -#define G1_NULL_HRS_INDEX ((uint) -1) +// sentinel value for hrm_index +#define G1_NO_HRM_INDEX ((uint) -1) // A dirty card to oop closure for heap regions. It // knows how to get the G1 heap and how to use the bitmap // in the concurrent marker used by G1 to filter remembered // sets. -class HeapRegionDCTOC : public ContiguousSpaceDCTOC { +class HeapRegionDCTOC : public DirtyCardToOopClosure { public: // Specification of possible DirtyCardToOopClosure filtering. enum FilterKind { @@ -85,39 +80,13 @@ FilterKind _fk; G1CollectedHeap* _g1; - void walk_mem_region_with_cl(MemRegion mr, - HeapWord* bottom, HeapWord* top, - ExtendedOopClosure* cl); - - // We don't specialize this for FilteringClosure; filtering is handled by - // the "FilterKind" mechanism. But we provide this to avoid a compiler - // warning. - void walk_mem_region_with_cl(MemRegion mr, - HeapWord* bottom, HeapWord* top, - FilteringClosure* cl) { - HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top, - (ExtendedOopClosure*)cl); - } - - // Get the actual top of the area on which the closure will - // operate, given where the top is assumed to be (the end of the - // memory region passed to do_MemRegion) and where the object - // at the top is assumed to start. For example, an object may - // start at the top but actually extend past the assumed top, - // in which case the top becomes the end of the object. - HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) { - return ContiguousSpaceDCTOC::get_actual_top(top, top_obj); - } - // Walk the given memory region from bottom to (actual) top // looking for objects and applying the oop closure (_cl) to // them. The base implementation of this treats the area as // blocks, where a block may or may not be an object. Sub- // classes should override this to provide more accurate // or possibly more efficient walking. - void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) { - Filtering_DCTOC::walk_mem_region(mr, bottom, top); - } + void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); public: HeapRegionDCTOC(G1CollectedHeap* g1, @@ -151,9 +120,9 @@ // the regions anyway) and at the end of a Full GC. The current scheme // that uses sequential unsigned ints will fail only if we have 4b // evacuation pauses between two cleanups, which is _highly_ unlikely. - -class G1OffsetTableContigSpace: public ContiguousSpace { +class G1OffsetTableContigSpace: public CompactibleSpace { friend class VMStructs; + HeapWord* _top; protected: G1BlockOffsetArrayContigSpace _offsets; Mutex _par_alloc_lock; @@ -170,11 +139,35 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr); + void set_top(HeapWord* value) { _top = value; } + HeapWord* top() const { return _top; } + + protected: + // Reset the G1OffsetTableContigSpace. + virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); + + HeapWord** top_addr() { return &_top; } + // Allocation helpers (return NULL if full). + inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); + inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); + + public: + void reset_after_compaction() { set_top(compaction_top()); } + + size_t used() const { return byte_size(bottom(), top()); } + size_t free() const { return byte_size(top(), end()); } + bool is_free_block(const HeapWord* p) const { return p >= top(); } + + MemRegion used_region() const { return MemRegion(bottom(), top()); } + + void object_iterate(ObjectClosure* blk); + void safe_object_iterate(ObjectClosure* blk); + void set_bottom(HeapWord* value); void set_end(HeapWord* value); virtual HeapWord* saved_mark_word() const; - virtual void set_saved_mark(); + void record_top_and_timestamp(); void reset_gc_time_stamp() { _gc_time_stamp = 0; } unsigned get_gc_time_stamp() { return _gc_time_stamp; } @@ -194,6 +187,8 @@ HeapWord* block_start(const void* p); HeapWord* block_start_const(const void* p) const; + void prepare_for_compaction(CompactPoint* cp); + // Add offset table update. virtual HeapWord* allocate(size_t word_size); HeapWord* par_allocate(size_t word_size); @@ -205,12 +200,7 @@ virtual void print() const; void reset_bot() { - _offsets.zero_bottom_entry(); - _offsets.initialize_threshold(); - } - - void update_bot_for_object(HeapWord* start, size_t word_size) { - _offsets.alloc_block(start, word_size); + _offsets.reset_bot(); } void print_bot_on(outputStream* out) { @@ -222,16 +212,6 @@ friend class VMStructs; private: - enum HumongousType { - NotHumongous = 0, - StartsHumongous, - ContinuesHumongous - }; - - // Requires that the region "mr" be dense with objects, and begin and end - // with an object. - void oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl); - // The remembered set for this region. // (Might want to make this "inline" later, to avoid some alloc failure // issues.) @@ -241,9 +221,12 @@ protected: // The index of this region in the heap region sequence. - uint _hrs_index; + uint _hrm_index; - HumongousType _humongous_type; + AllocationContext_t _allocation_context; + + HeapRegionType _type; + // For a humongous region, region in which it starts. HeapRegion* _humongous_start_region; // For the start region of a humongous sequence, it's original end(). @@ -256,11 +239,9 @@ bool _evacuation_failed; // A heap region may be a member one of a number of special subsets, each - // represented as linked lists through the field below. Currently, these - // sets include: + // represented as linked lists through the field below. Currently, there + // is only one set: // The collection set. - // The set of allocation regions used in a collection pause. - // Spaces that may contain gray objects. HeapRegion* _next_in_special_set; // next region in the young "generation" region set @@ -275,7 +256,6 @@ #ifdef ASSERT HeapRegionSetBase* _containing_set; #endif // ASSERT - bool _pending_removal; // For parallel heapRegion traversal. jint _claimed; @@ -288,13 +268,6 @@ // The calculated GC efficiency of the region. double _gc_efficiency; - enum YoungType { - NotYoung, // a region is not young - Young, // a region is young - Survivor // a region is young and it contains survivors - }; - - volatile YoungType _young_type; int _young_index_in_cset; SurvRateGroup* _surv_rate_group; int _age_index; @@ -319,12 +292,6 @@ _next_top_at_mark_start = bot; } - void set_young_type(YoungType new_type) { - //assert(_young_type != new_type, "setting the same type" ); - // TODO: add more assertions here - _young_type = new_type; - } - // Cached attributes used in the collection set policy information // The RSet length that was added to the total value @@ -340,10 +307,16 @@ size_t _predicted_bytes_to_copy; public: - HeapRegion(uint hrs_index, + HeapRegion(uint hrm_index, G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr); + // Initializing the HeapRegion not only resets the data structure, but also + // resets the BOT for that heap region. + // The default values for clear_space means that we will do the clearing if + // there's clearing to be done ourselves. We also always mangle the space. + virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle); + static int LogOfHRGrainBytes; static int LogOfHRGrainWords; @@ -379,18 +352,19 @@ ParMarkRootClaimValue = 9 }; - inline HeapWord* par_allocate_no_bot_updates(size_t word_size) { - assert(is_young(), "we can only skip BOT updates on young regions"); - return ContiguousSpace::par_allocate(word_size); - } - inline HeapWord* allocate_no_bot_updates(size_t word_size) { - assert(is_young(), "we can only skip BOT updates on young regions"); - return ContiguousSpace::allocate(word_size); - } + // All allocated blocks are occupied by objects in a HeapRegion + bool block_is_obj(const HeapWord* p) const; - // If this region is a member of a HeapRegionSeq, the index in that + // Returns the object size for all valid block starts + // and the amount of unallocated words if called on top() + size_t block_size(const HeapWord* p) const; + + inline HeapWord* par_allocate_no_bot_updates(size_t word_size); + inline HeapWord* allocate_no_bot_updates(size_t word_size); + + // If this region is a member of a HeapRegionManager, the index in that // sequence, otherwise -1. - uint hrs_index() const { return _hrs_index; } + uint hrm_index() const { return _hrm_index; } // The number of bytes marked live in the region in the last marking phase. size_t marked_bytes() { return _prev_marked_bytes; } @@ -437,9 +411,21 @@ _prev_marked_bytes = _next_marked_bytes = 0; } - bool isHumongous() const { return _humongous_type != NotHumongous; } - bool startsHumongous() const { return _humongous_type == StartsHumongous; } - bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; } + const char* get_type_str() const { return _type.get_str(); } + const char* get_short_type_str() const { return _type.get_short_str(); } + + bool is_free() const { return _type.is_free(); } + + bool is_young() const { return _type.is_young(); } + bool is_eden() const { return _type.is_eden(); } + bool is_survivor() const { return _type.is_survivor(); } + + bool isHumongous() const { return _type.is_humongous(); } + bool startsHumongous() const { return _type.is_starts_humongous(); } + bool continuesHumongous() const { return _type.is_continues_humongous(); } + + bool is_old() const { return _type.is_old(); } + // For a humongous region, region in which it starts. HeapRegion* humongous_start_region() const { return _humongous_start_region; @@ -461,7 +447,7 @@ // with this HS region. uint last_hc_index() const { assert(startsHumongous(), "don't call this otherwise"); - return hrs_index() + region_num(); + return hrm_index() + region_num(); } // Same as Space::is_in_reserved, but will use the original size of the region. @@ -503,7 +489,7 @@ void set_continuesHumongous(HeapRegion* first_hr); // Unsets the humongous-related fields on the region. - void set_notHumongous(); + void clear_humongous(); // If the region has a remembered set, return a pointer to it. HeapRegionRemSet* rem_set() const { @@ -530,6 +516,14 @@ _next_in_special_set = r; } + void set_allocation_context(AllocationContext_t context) { + _allocation_context = context; + } + + AllocationContext_t allocation_context() const { + return _allocation_context; + } + // Methods used by the HeapRegionSetBase class and subclasses. // Getter and setter for the next and prev fields used to link regions into @@ -563,26 +557,6 @@ // to provide a dummy version of it. #endif // ASSERT - // If we want to remove regions from a list in bulk we can simply tag - // them with the pending_removal tag and call the - // remove_all_pending() method on the list. - - bool pending_removal() { return _pending_removal; } - - void set_pending_removal(bool pending_removal) { - if (pending_removal) { - assert(!_pending_removal && containing_set() != NULL, - "can only set pending removal to true if it's false and " - "the region belongs to a region set"); - } else { - assert( _pending_removal && containing_set() == NULL, - "can only set pending removal to false if it's true and " - "the region does not belong to a region set"); - } - - _pending_removal = pending_removal; - } - HeapRegion* get_next_young_region() { return _next_young_region; } void set_next_young_region(HeapRegion* hr) { _next_young_region = hr; @@ -593,10 +567,7 @@ void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; } bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; } - HeapWord* orig_end() { return _orig_end; } - - // Allows logical separation between objects allocated before and after. - void save_marks(); + HeapWord* orig_end() const { return _orig_end; } // Reset HR stuff to default values. void hr_clear(bool par, bool clear_space, bool locked = false); @@ -606,10 +577,6 @@ HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; } - // Apply "cl->do_oop" to (the addresses of) all reference fields in objects - // allocated in the current region before the last call to "save_mark". - void oop_before_save_marks_iterate(ExtendedOopClosure* cl); - // Note the start or end of marking. This tells the heap region // that the collector is about to start or has finished (concurrently) // marking the heap. @@ -657,9 +624,6 @@ void calc_gc_efficiency(void); double gc_efficiency() { return _gc_efficiency;} - bool is_young() const { return _young_type != NotYoung; } - bool is_survivor() const { return _young_type == Survivor; } - int young_index_in_cset() const { return _young_index_in_cset; } void set_young_index_in_cset(int index) { assert( (index == -1) || is_young(), "pre-condition" ); @@ -711,11 +675,13 @@ } } - void set_young() { set_young_type(Young); } + void set_free() { _type.set_free(); } - void set_survivor() { set_young_type(Survivor); } + void set_eden() { _type.set_eden(); } + void set_eden_pre_gc() { _type.set_eden_pre_gc(); } + void set_survivor() { _type.set_survivor(); } - void set_not_young() { set_young_type(NotYoung); } + void set_old() { _type.set_old(); } // Determine if an object has been allocated since the last // mark performed by the collector. This returns true iff the object @@ -767,18 +733,6 @@ bool filter_young, jbyte* card_ptr); - // A version of block start that is guaranteed to find *some* block - // boundary at or before "p", but does not object iteration, and may - // therefore be used safely when the heap is unparseable. - HeapWord* block_start_careful(const void* p) const { - return _offsets.block_start_careful(p); - } - - // Requires that "addr" is within the region. Returns the start of the - // first ("careful") block that starts at or after "addr", or else the - // "end" of the region if there is no such block. - HeapWord* next_block_start_careful(HeapWord* addr); - size_t recorded_rs_length() const { return _recorded_rs_length; } double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; } size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; } @@ -795,10 +749,6 @@ _predicted_bytes_to_copy = bytes; } -#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ - virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); - SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL) - virtual CompactibleSpace* next_compaction_space() const; virtual void reset_after_compaction(); @@ -806,14 +756,9 @@ // Routines for managing a list of code roots (attached to the // this region's RSet) that point into this heap region. void add_strong_code_root(nmethod* nm); + void add_strong_code_root_locked(nmethod* nm); void remove_strong_code_root(nmethod* nm); - // During a collection, migrate the successfully evacuated - // strong code roots that referenced into this region to the - // new regions that they now point into. Unsuccessfully - // evacuated code roots are not migrated. - void migrate_strong_code_roots(); - // Applies blk->do_code_blob() to each of the entries in // the strong code roots list for this region void strong_code_roots_do(CodeBlobClosure* blk) const; @@ -847,7 +792,7 @@ // HeapRegionClosure is used for iterating over regions. // Terminates the iteration when the "doHeapRegion" method returns "true". class HeapRegionClosure : public StackObj { - friend class HeapRegionSeq; + friend class HeapRegionManager; friend class G1CollectedHeap; bool _complete; @@ -864,6 +809,4 @@ bool complete() { return _complete; } }; -#endif // INCLUDE_ALL_GCS - #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,8 +25,49 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP +#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" +#include "gc_implementation/g1/g1CollectedHeap.hpp" +#include "gc_implementation/g1/heapRegion.hpp" +#include "memory/space.hpp" +#include "runtime/atomic.inline.hpp" + +// This version requires locking. +inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size, + HeapWord* const end_value) { + HeapWord* obj = top(); + if (pointer_delta(end_value, obj) >= size) { + HeapWord* new_top = obj + size; + set_top(new_top); + assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + return obj; + } else { + return NULL; + } +} + +// This version is lock-free. +inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size, + HeapWord* const end_value) { + do { + HeapWord* obj = top(); + if (pointer_delta(end_value, obj) >= size) { + HeapWord* new_top = obj + size; + HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); + // result can be one of two: + // the old top value: the exchange succeeded + // otherwise: the new value of the top is returned. + if (result == obj) { + assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + return obj; + } + } else { + return NULL; + } + } while (true); +} + inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { - HeapWord* res = ContiguousSpace::allocate(size); + HeapWord* res = allocate_impl(size, end()); if (res != NULL) { _offsets.alloc_block(res, size); } @@ -38,12 +79,7 @@ // this is used for larger LAB allocations only. inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { MutexLocker x(&_par_alloc_lock); - // Given that we take the lock no need to use par_allocate() here. - HeapWord* res = ContiguousSpace::allocate(size); - if (res != NULL) { - _offsets.alloc_block(res, size); - } - return res; + return allocate(size); } inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { @@ -55,6 +91,52 @@ return _offsets.block_start_const(p); } +inline bool +HeapRegion::block_is_obj(const HeapWord* p) const { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + if (ClassUnloadingWithConcurrentMark) { + return !g1h->is_obj_dead(oop(p), this); + } + return p < top(); +} + +inline size_t +HeapRegion::block_size(const HeapWord *addr) const { + if (addr == top()) { + return pointer_delta(end(), addr); + } + + if (block_is_obj(addr)) { + return oop(addr)->size(); + } + + assert(ClassUnloadingWithConcurrentMark, + err_msg("All blocks should be objects if G1 Class Unloading isn't used. " + "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") " + "addr: " PTR_FORMAT, + p2i(bottom()), p2i(top()), p2i(end()), p2i(addr))); + + // Old regions' dead objects may have dead classes + // We need to find the next live object in some other + // manner than getting the oop size + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()-> + getNextMarkedWordAddress(addr, prev_top_at_mark_start()); + + assert(next > addr, "must get the next live object"); + return pointer_delta(next, addr); +} + +inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { + assert(is_young(), "we can only skip BOT updates on young regions"); + return par_allocate_impl(word_size, end()); +} + +inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { + assert(is_young(), "we can only skip BOT updates on young regions"); + return allocate_impl(word_size, end()); +} + inline void HeapRegion::note_start_of_marking() { _next_marked_bytes = 0; _next_top_at_mark_start = top(); --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionBounds.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP + +class HeapRegionBounds : public AllStatic { +private: + // Minimum region size; we won't go lower than that. + // We might want to decrease this in the future, to deal with small + // heaps a bit more efficiently. + static const size_t MIN_REGION_SIZE = 1024 * 1024; + + // Maximum region size; we don't go higher than that. There's a good + // reason for having an upper bound. We don't want regions to get too + // large, otherwise cleanup's effectiveness would decrease as there + // will be fewer opportunities to find totally empty regions after + // marking. + static const size_t MAX_REGION_SIZE = 32 * 1024 * 1024; + + // The automatic region size calculation will try to have around this + // many regions in the heap (based on the min heap size). + static const size_t TARGET_REGION_NUMBER = 2048; + +public: + static inline size_t min_size(); + static inline size_t max_size(); + static inline size_t target_number(); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionBounds.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "gc_implementation/g1/heapRegionBounds.hpp" + +size_t HeapRegionBounds::min_size() { + return MIN_REGION_SIZE; +} + +size_t HeapRegionBounds::max_size() { + return MAX_REGION_SIZE; +} + +size_t HeapRegionBounds::target_number() { + return TARGET_REGION_NUMBER; +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,447 @@ +/* + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/heapRegion.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" +#include "gc_implementation/g1/heapRegionSet.inline.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/concurrentG1Refine.hpp" +#include "memory/allocation.hpp" + +void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, + G1RegionToSpaceMapper* prev_bitmap, + G1RegionToSpaceMapper* next_bitmap, + G1RegionToSpaceMapper* bot, + G1RegionToSpaceMapper* cardtable, + G1RegionToSpaceMapper* card_counts) { + _allocated_heapregions_length = 0; + + _heap_mapper = heap_storage; + + _prev_bitmap_mapper = prev_bitmap; + _next_bitmap_mapper = next_bitmap; + + _bot_mapper = bot; + _cardtable_mapper = cardtable; + + _card_counts_mapper = card_counts; + + MemRegion reserved = heap_storage->reserved(); + _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes); + + _available_map.resize(_regions.length(), false); + _available_map.clear(); +} + +bool HeapRegionManager::is_available(uint region) const { + return _available_map.at(region); +} + +#ifdef ASSERT +bool HeapRegionManager::is_free(HeapRegion* hr) const { + return _free_list.contains(hr); +} +#endif + +HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index); + MemRegion mr(bottom, bottom + HeapRegion::GrainWords); + assert(reserved().contains(mr), "invariant"); + return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr); +} + +void HeapRegionManager::commit_regions(uint index, size_t num_regions) { + guarantee(num_regions > 0, "Must commit more than zero regions"); + guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions"); + + _num_committed += (uint)num_regions; + + _heap_mapper->commit_regions(index, num_regions); + + // Also commit auxiliary data + _prev_bitmap_mapper->commit_regions(index, num_regions); + _next_bitmap_mapper->commit_regions(index, num_regions); + + _bot_mapper->commit_regions(index, num_regions); + _cardtable_mapper->commit_regions(index, num_regions); + + _card_counts_mapper->commit_regions(index, num_regions); +} + +void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) { + guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start)); + guarantee(_num_committed >= num_regions, "pre-condition"); + + // Print before uncommitting. + if (G1CollectedHeap::heap()->hr_printer()->is_active()) { + for (uint i = start; i < start + num_regions; i++) { + HeapRegion* hr = at(i); + G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end()); + } + } + + _num_committed -= (uint)num_regions; + + _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range); + _heap_mapper->uncommit_regions(start, num_regions); + + // Also uncommit auxiliary data + _prev_bitmap_mapper->uncommit_regions(start, num_regions); + _next_bitmap_mapper->uncommit_regions(start, num_regions); + + _bot_mapper->uncommit_regions(start, num_regions); + _cardtable_mapper->uncommit_regions(start, num_regions); + + _card_counts_mapper->uncommit_regions(start, num_regions); +} + +void HeapRegionManager::make_regions_available(uint start, uint num_regions) { + guarantee(num_regions > 0, "No point in calling this for zero regions"); + commit_regions(start, num_regions); + for (uint i = start; i < start + num_regions; i++) { + if (_regions.get_by_index(i) == NULL) { + HeapRegion* new_hr = new_heap_region(i); + _regions.set_by_index(i, new_hr); + _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1); + } + } + + _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range); + + for (uint i = start; i < start + num_regions; i++) { + assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i)); + HeapRegion* hr = at(i); + if (G1CollectedHeap::heap()->hr_printer()->is_active()) { + G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end()); + } + HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i); + MemRegion mr(bottom, bottom + HeapRegion::GrainWords); + + hr->initialize(mr); + insert_into_free_list(at(i)); + } +} + +uint HeapRegionManager::expand_by(uint num_regions) { + return expand_at(0, num_regions); +} + +uint HeapRegionManager::expand_at(uint start, uint num_regions) { + if (num_regions == 0) { + return 0; + } + + uint cur = start; + uint idx_last_found = 0; + uint num_last_found = 0; + + uint expanded = 0; + + while (expanded < num_regions && + (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) { + uint to_expand = MIN2(num_regions - expanded, num_last_found); + make_regions_available(idx_last_found, to_expand); + expanded += to_expand; + cur = idx_last_found + num_last_found + 1; + } + + verify_optional(); + return expanded; +} + +uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) { + uint found = 0; + size_t length_found = 0; + uint cur = 0; + + while (length_found < num && cur < max_length()) { + HeapRegion* hr = _regions.get_by_index(cur); + if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { + // This region is a potential candidate for allocation into. + length_found++; + } else { + // This region is not a candidate. The next region is the next possible one. + found = cur + 1; + length_found = 0; + } + cur++; + } + + if (length_found == num) { + for (uint i = found; i < (found + num); i++) { + HeapRegion* hr = _regions.get_by_index(i); + // sanity check + guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), + err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT + " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr))); + } + return found; + } else { + return G1_NO_HRM_INDEX; + } +} + +HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const { + guarantee(r != NULL, "Start region must be a valid region"); + guarantee(is_available(r->hrm_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrm_index())); + for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) { + HeapRegion* hr = _regions.get_by_index(i); + if (is_available(i)) { + return hr; + } + } + return NULL; +} + +void HeapRegionManager::iterate(HeapRegionClosure* blk) const { + uint len = max_length(); + + for (uint i = 0; i < len; i++) { + if (!is_available(i)) { + continue; + } + guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i)); + bool res = blk->doHeapRegion(at(i)); + if (res) { + blk->incomplete(); + return; + } + } +} + +uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const { + guarantee(res_idx != NULL, "checking"); + guarantee(start_idx <= (max_length() + 1), "checking"); + + uint num_regions = 0; + + uint cur = start_idx; + while (cur < max_length() && is_available(cur)) { + cur++; + } + if (cur == max_length()) { + return num_regions; + } + *res_idx = cur; + while (cur < max_length() && !is_available(cur)) { + cur++; + } + num_regions = cur - *res_idx; +#ifdef ASSERT + for (uint i = *res_idx; i < (*res_idx + num_regions); i++) { + assert(!is_available(i), "just checking"); + } + assert(cur == max_length() || num_regions == 0 || is_available(cur), + err_msg("The region at the current position %u must be available or at the end of the heap.", cur)); +#endif + return num_regions; +} + +uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const { + return num_regions * worker_i / num_workers; +} + +void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const { + const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length); + + // Every worker will actually look at all regions, skipping over regions that + // are currently not committed. + // This also (potentially) iterates over regions newly allocated during GC. This + // is no problem except for some extra work. + for (uint count = 0; count < _allocated_heapregions_length; count++) { + const uint index = (start_index + count) % _allocated_heapregions_length; + assert(0 <= index && index < _allocated_heapregions_length, "sanity"); + // Skip over unavailable regions + if (!is_available(index)) { + continue; + } + HeapRegion* r = _regions.get_by_index(index); + // We'll ignore "continues humongous" regions (we'll process them + // when we come across their corresponding "start humongous" + // region) and regions already claimed. + if (r->claim_value() == claim_value || r->continuesHumongous()) { + continue; + } + // OK, try to claim it + if (!r->claimHeapRegion(claim_value)) { + continue; + } + // Success! + if (r->startsHumongous()) { + // If the region is "starts humongous" we'll iterate over its + // "continues humongous" first; in fact we'll do them + // first. The order is important. In one case, calling the + // closure on the "starts humongous" region might de-allocate + // and clear all its "continues humongous" regions and, as a + // result, we might end up processing them twice. So, we'll do + // them first (note: most closures will ignore them anyway) and + // then we'll do the "starts humongous" region. + for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) { + HeapRegion* chr = _regions.get_by_index(ch_index); + + assert(chr->continuesHumongous(), "Must be humongous region"); + assert(chr->humongous_start_region() == r, + err_msg("Must work on humongous continuation of the original start region " + PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr))); + assert(chr->claim_value() != claim_value, + "Must not have been claimed yet because claiming of humongous continuation first claims the start region"); + + bool claim_result = chr->claimHeapRegion(claim_value); + // We should always be able to claim it; no one else should + // be trying to claim this region. + guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object"); + + bool res2 = blk->doHeapRegion(chr); + if (res2) { + return; + } + + // Right now, this holds (i.e., no closure that actually + // does something with "continues humongous" regions + // clears them). We might have to weaken it in the future, + // but let's leave these two asserts here for extra safety. + assert(chr->continuesHumongous(), "should still be the case"); + assert(chr->humongous_start_region() == r, "sanity"); + } + } + + bool res = blk->doHeapRegion(r); + if (res) { + return; + } + } +} + +uint HeapRegionManager::shrink_by(uint num_regions_to_remove) { + assert(length() > 0, "the region sequence should not be empty"); + assert(length() <= _allocated_heapregions_length, "invariant"); + assert(_allocated_heapregions_length > 0, "we should have at least one region committed"); + assert(num_regions_to_remove < length(), "We should never remove all regions"); + + if (num_regions_to_remove == 0) { + return 0; + } + + uint removed = 0; + uint cur = _allocated_heapregions_length - 1; + uint idx_last_found = 0; + uint num_last_found = 0; + + while ((removed < num_regions_to_remove) && + (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) { + uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found); + + uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove); + + cur -= num_last_found; + removed += to_remove; + } + + verify_optional(); + + return removed; +} + +uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { + guarantee(start_idx < _allocated_heapregions_length, "checking"); + guarantee(res_idx != NULL, "checking"); + + uint num_regions_found = 0; + + jlong cur = start_idx; + while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) { + cur--; + } + if (cur == -1) { + return num_regions_found; + } + jlong old_cur = cur; + // cur indexes the first empty region + while (cur != -1 && is_available(cur) && at(cur)->is_empty()) { + cur--; + } + *res_idx = cur + 1; + num_regions_found = old_cur - cur; + +#ifdef ASSERT + for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) { + assert(at(i)->is_empty(), "just checking"); + } +#endif + return num_regions_found; +} + +void HeapRegionManager::verify() { + guarantee(length() <= _allocated_heapregions_length, + err_msg("invariant: _length: %u _allocated_length: %u", + length(), _allocated_heapregions_length)); + guarantee(_allocated_heapregions_length <= max_length(), + err_msg("invariant: _allocated_length: %u _max_length: %u", + _allocated_heapregions_length, max_length())); + + bool prev_committed = true; + uint num_committed = 0; + HeapWord* prev_end = heap_bottom(); + for (uint i = 0; i < _allocated_heapregions_length; i++) { + if (!is_available(i)) { + prev_committed = false; + continue; + } + num_committed++; + HeapRegion* hr = _regions.get_by_index(i); + guarantee(hr != NULL, err_msg("invariant: i: %u", i)); + guarantee(!prev_committed || hr->bottom() == prev_end, + err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, + i, HR_FORMAT_PARAMS(hr), p2i(prev_end))); + guarantee(hr->hrm_index() == i, + err_msg("invariant: i: %u hrm_index(): %u", i, hr->hrm_index())); + // Asserts will fire if i is >= _length + HeapWord* addr = hr->bottom(); + guarantee(addr_to_region(addr) == hr, "sanity"); + // We cannot check whether the region is part of a particular set: at the time + // this method may be called, we have only completed allocation of the regions, + // but not put into a region set. + prev_committed = true; + if (hr->startsHumongous()) { + prev_end = hr->orig_end(); + } else { + prev_end = hr->end(); + } + } + for (uint i = _allocated_heapregions_length; i < max_length(); i++) { + guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i)); + } + + guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed)); + _free_list.verify(); +} + +#ifndef PRODUCT +void HeapRegionManager::verify_optional() { + verify(); +} +#endif // PRODUCT + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP + +#include "gc_implementation/g1/g1BiasedArray.hpp" +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" +#include "gc_implementation/g1/heapRegionSet.hpp" + +class HeapRegion; +class HeapRegionClosure; +class FreeRegionList; + +class G1HeapRegionTable : public G1BiasedMappedArray { + protected: + virtual HeapRegion* default_value() const { return NULL; } +}; + +// This class keeps track of the actual heap memory, auxiliary data +// and its metadata (i.e., HeapRegion instances) and the list of free regions. +// +// This allows maximum flexibility for deciding what to commit or uncommit given +// a request from outside. +// +// HeapRegions are kept in the _regions array in address order. A region's +// index in the array corresponds to its index in the heap (i.e., 0 is the +// region at the bottom of the heap, 1 is the one after it, etc.). Two +// regions that are consecutive in the array should also be adjacent in the +// address space (i.e., region(i).end() == region(i+1).bottom(). +// +// We create a HeapRegion when we commit the region's address space +// for the first time. When we uncommit the address space of a +// region we retain the HeapRegion to be able to re-use it in the +// future (in case we recommit it). +// +// We keep track of three lengths: +// +// * _num_committed (returned by length()) is the number of currently +// committed regions. These may not be contiguous. +// * _allocated_heapregions_length (not exposed outside this class) is the +// number of regions+1 for which we have HeapRegions. +// * max_length() returns the maximum number of regions the heap can have. +// + +class HeapRegionManager: public CHeapObj { + friend class VMStructs; + + G1HeapRegionTable _regions; + + G1RegionToSpaceMapper* _heap_mapper; + G1RegionToSpaceMapper* _prev_bitmap_mapper; + G1RegionToSpaceMapper* _next_bitmap_mapper; + G1RegionToSpaceMapper* _bot_mapper; + G1RegionToSpaceMapper* _cardtable_mapper; + G1RegionToSpaceMapper* _card_counts_mapper; + + FreeRegionList _free_list; + + // Each bit in this bitmap indicates that the corresponding region is available + // for allocation. + BitMap _available_map; + + // The number of regions committed in the heap. + uint _num_committed; + + // Internal only. The highest heap region +1 we allocated a HeapRegion instance for. + uint _allocated_heapregions_length; + + HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); } + HeapWord* heap_end() const {return _regions.end_address_mapped(); } + + void make_regions_available(uint index, uint num_regions = 1); + + // Pass down commit calls to the VirtualSpace. + void commit_regions(uint index, size_t num_regions = 1); + void uncommit_regions(uint index, size_t num_regions = 1); + + // Notify other data structures about change in the heap layout. + void update_committed_space(HeapWord* old_end, HeapWord* new_end); + // Calculate the starting region for each worker during parallel iteration so + // that they do not all start from the same region. + uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const; + + // Find a contiguous set of empty or uncommitted regions of length num and return + // the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful. + // If only_empty is true, only empty regions are considered. + // Searches from bottom to top of the heap, doing a first-fit. + uint find_contiguous(size_t num, bool only_empty); + // Finds the next sequence of unavailable regions starting from start_idx. Returns the + // length of the sequence found. If this result is zero, no such sequence could be found, + // otherwise res_idx indicates the start index of these regions. + uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const; + // Finds the next sequence of empty regions starting from start_idx, going backwards in + // the heap. Returns the length of the sequence found. If this value is zero, no + // sequence could be found, otherwise res_idx contains the start index of this range. + uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const; + // Allocate a new HeapRegion for the given index. + HeapRegion* new_heap_region(uint hrm_index); +#ifdef ASSERT +public: + bool is_free(HeapRegion* hr) const; +#endif + // Returns whether the given region is available for allocation. + bool is_available(uint region) const; + + public: + // Empty constructor, we'll initialize it with the initialize() method. + HeapRegionManager() : _regions(), _heap_mapper(NULL), _num_committed(0), + _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL), + _allocated_heapregions_length(0), _available_map(), + _free_list("Free list", new MasterFreeRegionListMtSafeChecker()) + { } + + void initialize(G1RegionToSpaceMapper* heap_storage, + G1RegionToSpaceMapper* prev_bitmap, + G1RegionToSpaceMapper* next_bitmap, + G1RegionToSpaceMapper* bot, + G1RegionToSpaceMapper* cardtable, + G1RegionToSpaceMapper* card_counts); + + // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired + // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit + // the heap from the lowest address, this region (and its associated data + // structures) are available and we do not need to check further. + HeapRegion* get_dummy_region() { return new_heap_region(0); } + + // Return the HeapRegion at the given index. Assume that the index + // is valid. + inline HeapRegion* at(uint index) const; + + // If addr is within the committed space return its corresponding + // HeapRegion, otherwise return NULL. + inline HeapRegion* addr_to_region(HeapWord* addr) const; + + // Insert the given region into the free region list. + inline void insert_into_free_list(HeapRegion* hr); + + // Insert the given region list into the global free region list. + void insert_list_into_free_list(FreeRegionList* list) { + _free_list.add_ordered(list); + } + + HeapRegion* allocate_free_region(bool is_old) { + HeapRegion* hr = _free_list.remove_region(is_old); + + if (hr != NULL) { + assert(hr->next() == NULL, "Single region should not have next"); + assert(is_available(hr->hrm_index()), "Must be committed"); + } + return hr; + } + + inline void allocate_free_regions_starting_at(uint first, uint num_regions); + + // Remove all regions from the free list. + void remove_all_free_regions() { + _free_list.remove_all(); + } + + // Return the number of committed free regions in the heap. + uint num_free_regions() const { + return _free_list.length(); + } + + size_t total_capacity_bytes() const { + return num_free_regions() * HeapRegion::GrainBytes; + } + + // Return the number of available (uncommitted) regions. + uint available() const { return max_length() - length(); } + + // Return the number of regions that have been committed in the heap. + uint length() const { return _num_committed; } + + // Return the maximum number of regions in the heap. + uint max_length() const { return (uint)_regions.length(); } + + MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); } + + // Expand the sequence to reflect that the heap has grown. Either create new + // HeapRegions, or re-use existing ones. Returns the number of regions the + // sequence was expanded by. If a HeapRegion allocation fails, the resulting + // number of regions might be smaller than what's desired. + uint expand_by(uint num_regions); + + // Makes sure that the regions from start to start+num_regions-1 are available + // for allocation. Returns the number of regions that were committed to achieve + // this. + uint expand_at(uint start, uint num_regions); + + // Find a contiguous set of empty regions of length num. Returns the start index of + // that set, or G1_NO_HRM_INDEX. + uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); } + // Find a contiguous set of empty or unavailable regions of length num. Returns the + // start index of that set, or G1_NO_HRM_INDEX. + uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); } + + HeapRegion* next_region_in_heap(const HeapRegion* r) const; + + // Apply blk->doHeapRegion() on all committed regions in address order, + // terminating the iteration early if doHeapRegion() returns true. + void iterate(HeapRegionClosure* blk) const; + + void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const; + + // Uncommit up to num_regions_to_remove regions that are completely free. + // Return the actual number of uncommitted regions. + uint shrink_by(uint num_regions_to_remove); + + void verify(); + + // Do some sanity checking. + void verify_optional() PRODUCT_RETURN; +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP + +#include "gc_implementation/g1/heapRegion.hpp" +#include "gc_implementation/g1/heapRegionManager.hpp" +#include "gc_implementation/g1/heapRegionSet.inline.hpp" + +inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const { + assert(addr < heap_end(), + err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, p2i(addr), p2i(heap_end()))); + assert(addr >= heap_bottom(), + err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom()))); + + HeapRegion* hr = _regions.get_by_address(addr); + return hr; +} + +inline HeapRegion* HeapRegionManager::at(uint index) const { + assert(is_available(index), "pre-condition"); + HeapRegion* hr = _regions.get_by_index(index); + assert(hr != NULL, "sanity"); + assert(hr->hrm_index() == index, "sanity"); + return hr; +} + +inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) { + _free_list.add_ordered(hr); +} + +inline void HeapRegionManager::allocate_free_regions_starting_at(uint first, uint num_regions) { + _free_list.remove_starting_at(at(first), num_regions); +} + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -27,7 +27,7 @@ #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "memory/allocation.hpp" #include "memory/padded.inline.hpp" #include "memory/space.inline.hpp" @@ -169,7 +169,7 @@ // Mem size in bytes. size_t mem_size() const { - return sizeof(this) + _bm.size_in_words() * HeapWordSize; + return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize; } // Requires "from" to be in "hr()". @@ -288,7 +288,7 @@ } _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries, - mtGC, 0, AllocFailStrategy::RETURN_NULL); + mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); if (_fine_grain_regions == NULL) { vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR, @@ -372,17 +372,17 @@ _max_regions, &_static_mem_size); - for (uint i = 0; i < n_par_rs; i++) { - for (uint j = 0; j < _max_regions; j++) { - set(i, j, InvalidCard); - } - } + invalidate(0, _max_regions); } -void FromCardCache::shrink(uint new_num_regions) { +void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) { + guarantee((size_t)start_idx + new_num_regions <= max_uintx, + err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT, + start_idx, new_num_regions)); for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { - assert(new_num_regions <= _max_regions, "Must be within max."); - for (uint j = new_num_regions; j < _max_regions; j++) { + uint end_idx = (start_idx + (uint)new_num_regions); + assert(end_idx <= _max_regions, "Must be within max."); + for (uint j = start_idx; j < end_idx; j++) { set(i, j, InvalidCard); } } @@ -406,12 +406,12 @@ } } -void OtherRegionsTable::init_from_card_cache(uint max_regions) { +void OtherRegionsTable::initialize(uint max_regions) { FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions); } -void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) { - FromCardCache::shrink(new_num_regions); +void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) { + FromCardCache::invalidate(start_idx, num_regions); } void OtherRegionsTable::print_from_card_cache() { @@ -419,7 +419,7 @@ } void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { - uint cur_hrs_ind = hr()->hrs_index(); + uint cur_hrm_ind = hr()->hrm_index(); if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", @@ -434,10 +434,10 @@ if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")", hr()->bottom(), from_card, - FromCardCache::at((uint)tid, cur_hrs_ind)); + FromCardCache::at((uint)tid, cur_hrm_ind)); } - if (FromCardCache::contains_or_replace((uint)tid, cur_hrs_ind, from_card)) { + if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) { if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print_cr(" from-card cache hit."); } @@ -447,10 +447,10 @@ // Note that this may be a continued H region. HeapRegion* from_hr = _g1h->heap_region_containing_raw(from); - RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index(); + RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index(); // If the region is already coarsened, return. - if (_coarse_map.at(from_hrs_ind)) { + if (_coarse_map.at(from_hrm_ind)) { if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print_cr(" coarse map hit."); } @@ -459,7 +459,7 @@ } // Otherwise find a per-region table to add it to. - size_t ind = from_hrs_ind & _mod_max_fine_entries_mask; + size_t ind = from_hrm_ind & _mod_max_fine_entries_mask; PerRegionTable* prt = find_region_table(ind, from_hr); if (prt == NULL) { MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); @@ -474,7 +474,7 @@ assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion, "Must be in range."); if (G1HRRSUseSparseTable && - _sparse_table.add_card(from_hrs_ind, card_index)) { + _sparse_table.add_card(from_hrm_ind, card_index)) { if (G1RecordHRRSOops) { HeapRegionRemSet::record(hr(), from); if (G1TraceHeapRegionRememberedSet) { @@ -493,8 +493,8 @@ } else { if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print_cr(" [tid %d] sparse table entry " - "overflow(f: %d, t: %d)", - tid, from_hrs_ind, cur_hrs_ind); + "overflow(f: %d, t: %u)", + tid, from_hrm_ind, cur_hrm_ind); } } @@ -515,7 +515,7 @@ if (G1HRRSUseSparseTable) { // Transfer from sparse to fine-grain. - SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind); + SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind); assert(sprt_entry != NULL, "There should have been an entry"); for (int i = 0; i < SparsePRTEntry::cards_num(); i++) { CardIdx_t c = sprt_entry->card(i); @@ -524,7 +524,7 @@ } } // Now we can delete the sparse entry. - bool res = _sparse_table.delete_entry(from_hrs_ind); + bool res = _sparse_table.delete_entry(from_hrm_ind); assert(res, "It should have been there."); } } @@ -606,9 +606,9 @@ guarantee(max != NULL, "Since _n_fine_entries > 0"); // Set the corresponding coarse bit. - size_t max_hrs_index = (size_t) max->hr()->hrs_index(); - if (!_coarse_map.at(max_hrs_index)) { - _coarse_map.at_put(max_hrs_index, true); + size_t max_hrm_index = (size_t) max->hr()->hrm_index(); + if (!_coarse_map.at(max_hrm_index)) { + _coarse_map.at_put(max_hrm_index, true); _n_coarse_entries++; if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] " @@ -632,7 +632,7 @@ BitMap* region_bm, BitMap* card_bm) { // First eliminated garbage regions from the coarse map. if (G1RSScrubVerbose) { - gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index()); + gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index()); } assert(_coarse_map.size() == region_bm->size(), "Precondition"); @@ -655,9 +655,9 @@ // If the entire region is dead, eliminate. if (G1RSScrubVerbose) { gclog_or_tty->print_cr(" For other region %u:", - cur->hr()->hrs_index()); + cur->hr()->hrm_index()); } - if (!region_bm->at((size_t) cur->hr()->hrs_index())) { + if (!region_bm->at((size_t) cur->hr()->hrm_index())) { *prev = nxt; cur->set_collision_list_next(NULL); _n_fine_entries--; @@ -694,6 +694,9 @@ clear_fcc(); } +bool OtherRegionsTable::is_empty() const { + return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL; +} size_t OtherRegionsTable::occupied() const { size_t sum = occ_fine(); @@ -735,7 +738,7 @@ sum += (sizeof(PerRegionTable*) * _max_fine_entries); sum += (_coarse_map.size_in_words() * HeapWordSize); sum += (_sparse_table.mem_size()); - sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above. + sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above. return sum; } @@ -748,7 +751,7 @@ } void OtherRegionsTable::clear_fcc() { - FromCardCache::clear(hr()->hrs_index()); + FromCardCache::clear(hr()->hrm_index()); } void OtherRegionsTable::clear() { @@ -770,30 +773,6 @@ clear_fcc(); } -void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - size_t hrs_ind = (size_t) from_hr->hrs_index(); - size_t ind = hrs_ind & _mod_max_fine_entries_mask; - if (del_single_region_table(ind, from_hr)) { - assert(!_coarse_map.at(hrs_ind), "Inv"); - } else { - _coarse_map.par_at_put(hrs_ind, 0); - } - // Check to see if any of the fcc entries come from here. - uint hr_ind = hr()->hrs_index(); - for (uint tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) { - int fcc_ent = FromCardCache::at(tid, hr_ind); - if (fcc_ent != FromCardCache::InvalidCard) { - HeapWord* card_addr = (HeapWord*) - (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift); - if (hr()->is_in_reserved(card_addr)) { - // Clear the from card cache. - FromCardCache::set(tid, hr_ind, FromCardCache::InvalidCard); - } - } - } -} - bool OtherRegionsTable::del_single_region_table(size_t ind, HeapRegion* hr) { assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); @@ -823,8 +802,7 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { HeapRegion* hr = _g1h->heap_region_containing_raw(from); - if (hr == NULL) return false; - RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index(); + RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index(); // Is this region in the coarse map? if (_coarse_map.at(hr_ind)) return true; @@ -861,8 +839,8 @@ HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegion* hr) : _bosa(bosa), - _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #"UINT32_FORMAT, hr->hrs_index()), true), - _code_roots(), _other_regions(hr, &_m) { + _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true), + _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) { reset_for_par_iteration(); } @@ -945,123 +923,61 @@ } // Code roots support +// +// The code root set is protected by two separate locking schemes +// When at safepoint the per-hrrs lock must be held during modifications +// except when doing a full gc. +// When not at safepoint the CodeCache_lock must be held during modifications. +// When concurrent readers access the contains() function +// (during the evacuation phase) no removals are allowed. void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { assert(nm != NULL, "sanity"); + // Optimistic unlocked contains-check + if (!_code_roots.contains(nm)) { + MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag); + add_strong_code_root_locked(nm); + } +} + +void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) { + assert(nm != NULL, "sanity"); _code_roots.add(nm); } void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { assert(nm != NULL, "sanity"); + assert_locked_or_safepoint(CodeCache_lock); + + MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag); _code_roots.remove(nm); + // Check that there were no duplicates guarantee(!_code_roots.contains(nm), "duplicate entry found"); } -class NMethodMigrationOopClosure : public OopClosure { - G1CollectedHeap* _g1h; - HeapRegion* _from; - nmethod* _nm; - - uint _num_self_forwarded; - - template void do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - if (_from->is_in(obj)) { - // Reference still points into the source region. - // Since roots are immediately evacuated this means that - // we must have self forwarded the object - assert(obj->is_forwarded(), - err_msg("code roots should be immediately evacuated. " - "Ref: "PTR_FORMAT", " - "Obj: "PTR_FORMAT", " - "Region: "HR_FORMAT, - p, (void*) obj, HR_FORMAT_PARAMS(_from))); - assert(obj->forwardee() == obj, - err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj)); - - // The object has been self forwarded. - // Note, if we're during an initial mark pause, there is - // no need to explicitly mark object. It will be marked - // during the regular evacuation failure handling code. - _num_self_forwarded++; - } else { - // The reference points into a promotion or to-space region - HeapRegion* to = _g1h->heap_region_containing(obj); - to->rem_set()->add_strong_code_root(_nm); - } - } - } - -public: - NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm): - _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {} - - void do_oop(narrowOop* p) { do_oop_work(p); } - void do_oop(oop* p) { do_oop_work(p); } - - uint retain() { return _num_self_forwarded > 0; } -}; - -void HeapRegionRemSet::migrate_strong_code_roots() { - assert(hr()->in_collection_set(), "only collection set regions"); - assert(!hr()->isHumongous(), - err_msg("humongous region "HR_FORMAT" should not have been added to the collection set", - HR_FORMAT_PARAMS(hr()))); - - ResourceMark rm; - - // List of code blobs to retain for this region - GrowableArray to_be_retained(10); - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - - while (!_code_roots.is_empty()) { - nmethod *nm = _code_roots.pop(); - if (nm != NULL) { - NMethodMigrationOopClosure oop_cl(g1h, hr(), nm); - nm->oops_do(&oop_cl); - if (oop_cl.retain()) { - to_be_retained.push(nm); - } - } - } - - // Now push any code roots we need to retain - assert(to_be_retained.is_empty() || hr()->evacuation_failed(), - "Retained nmethod list must be empty or " - "evacuation of this region failed"); - - while (to_be_retained.is_nonempty()) { - nmethod* nm = to_be_retained.pop(); - assert(nm != NULL, "sanity"); - add_strong_code_root(nm); - } -} - void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { _code_roots.nmethods_do(blk); } +void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) { + _code_roots.clean(hr); +} + size_t HeapRegionRemSet::strong_code_roots_mem_size() { return _code_roots.mem_size(); } -//-------------------- Iteration -------------------- - HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) : _hrrs(hrrs), _g1h(G1CollectedHeap::heap()), _coarse_map(&hrrs->_other_regions._coarse_map), - _fine_grain_regions(hrrs->_other_regions._fine_grain_regions), _bosa(hrrs->bosa()), _is(Sparse), // Set these values so that we increment to the first region. _coarse_cur_region_index(-1), _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1), - _cur_region_cur_card(0), - _fine_array_index(-1), + _cur_card_in_prt(HeapRegion::CardsPerRegion), _fine_cur_prt(NULL), _n_yielded_coarse(0), _n_yielded_fine(0), @@ -1093,58 +1009,59 @@ return true; } -void HeapRegionRemSetIterator::fine_find_next_non_null_prt() { - // Otherwise, find the next bucket list in the array. - _fine_array_index++; - while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) { - _fine_cur_prt = _fine_grain_regions[_fine_array_index]; - if (_fine_cur_prt != NULL) return; - else _fine_array_index++; - } - assert(_fine_cur_prt == NULL, "Loop post"); -} - bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) { if (fine_has_next()) { - _cur_region_cur_card = - _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1); + _cur_card_in_prt = + _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1); } - while (!fine_has_next()) { - if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) { - _cur_region_cur_card = 0; - _fine_cur_prt = _fine_cur_prt->collision_list_next(); + if (_cur_card_in_prt == HeapRegion::CardsPerRegion) { + // _fine_cur_prt may still be NULL in case if there are not PRTs at all for + // the remembered set. + if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) { + return false; } - if (_fine_cur_prt == NULL) { - fine_find_next_non_null_prt(); - if (_fine_cur_prt == NULL) return false; - } - assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0, - "inv."); - HeapWord* r_bot = - _fine_cur_prt->hr()->bottom(); - _cur_region_card_offset = _bosa->index_for(r_bot); - _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0); + PerRegionTable* next_prt = _fine_cur_prt->next(); + switch_to_prt(next_prt); + _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1); } - assert(fine_has_next(), "Or else we exited the loop via the return."); - card_index = _cur_region_card_offset + _cur_region_cur_card; + + card_index = _cur_region_card_offset + _cur_card_in_prt; + guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion, + err_msg("Card index "SIZE_FORMAT" must be within the region", _cur_card_in_prt)); return true; } bool HeapRegionRemSetIterator::fine_has_next() { - return - _fine_cur_prt != NULL && - _cur_region_cur_card < HeapRegion::CardsPerRegion; + return _cur_card_in_prt != HeapRegion::CardsPerRegion; +} + +void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) { + assert(prt != NULL, "Cannot switch to NULL prt"); + _fine_cur_prt = prt; + + HeapWord* r_bot = _fine_cur_prt->hr()->bottom(); + _cur_region_card_offset = _bosa->index_for(r_bot); + + // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1. + // To avoid special-casing this start case, and not miss the first bitmap + // entry, initialize _cur_region_cur_card with -1 instead of 0. + _cur_card_in_prt = (size_t)-1; } bool HeapRegionRemSetIterator::has_next(size_t& card_index) { switch (_is) { - case Sparse: + case Sparse: { if (_sparse_iter.has_next(card_index)) { _n_yielded_sparse++; return true; } // Otherwise, deliberate fall-through _is = Fine; + PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts; + if (initial_fine_prt != NULL) { + switch_to_prt(_hrrs->_other_regions._first_all_fine_prts); + } + } case Fine: if (fine_has_next(card_index)) { _n_yielded_fine++; @@ -1276,6 +1193,11 @@ #ifndef PRODUCT void PerRegionTable::test_fl_mem_size() { PerRegionTable* dummy = alloc(NULL); + + size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize; + assert(dummy->mem_size() > min_prt_size, + err_msg("PerRegionTable memory usage is suspiciously small, only has "SIZE_FORMAT" bytes. " + "Should be at least "SIZE_FORMAT" bytes.", dummy->mem_size(), min_prt_size)); free(dummy); guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size"); // try to reset the state --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -84,7 +84,7 @@ static void initialize(uint n_par_rs, uint max_num_regions); - static void shrink(uint new_num_regions); + static void invalidate(uint start_idx, size_t num_regions); static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN; @@ -185,6 +185,9 @@ // objects. void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); + // Returns whether this remembered set (and all sub-sets) contain no entries. + bool is_empty() const; + size_t occupied() const; size_t occ_fine() const; size_t occ_coarse() const; @@ -206,18 +209,15 @@ // Specifically clear the from_card_cache. void clear_fcc(); - // "from_hr" is being cleared; remove any entries from it. - void clear_incoming_entry(HeapRegion* from_hr); - void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task); // Declare the heap size (in # of regions) to the OtherRegionsTable. // (Uses it to initialize from_card_cache). - static void init_from_card_cache(uint max_regions); + static void initialize(uint max_regions); - // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. - // Make sure any entries for higher regions are invalid. - static void shrink_from_card_cache(uint new_num_regions); + // Declares that regions between start_idx <= i < start_idx + num_regions are + // not in use. Make sure that any entries for these regions are invalid. + static void invalidate(uint start_idx, size_t num_regions); static void print_from_card_cache(); }; @@ -272,6 +272,10 @@ return _other_regions.hr(); } + bool is_empty() const { + return (strong_code_roots_list_length() == 0) && _other_regions.is_empty(); + } + size_t occupied() { MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); return occupied_locked(); @@ -342,7 +346,7 @@ return _other_regions.mem_size() // This correction is necessary because the above includes the second // part. - + (sizeof(this) - sizeof(OtherRegionsTable)) + + (sizeof(HeapRegionRemSet) - sizeof(OtherRegionsTable)) + strong_code_roots_mem_size(); } @@ -355,7 +359,7 @@ // Returns the memory occupancy of all free_list data structures associated // with remembered sets. static size_t fl_mem_size() { - return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::fl_mem_size(); + return OtherRegionsTable::fl_mem_size(); } bool contains_reference(OopOrNarrowOopStar from) const { @@ -365,20 +369,17 @@ // Routines for managing the list of code roots that point into // the heap region that owns this RSet. void add_strong_code_root(nmethod* nm); + void add_strong_code_root_locked(nmethod* nm); void remove_strong_code_root(nmethod* nm); - // During a collection, migrate the successfully evacuated strong - // code roots that referenced into the region that owns this RSet - // to the RSets of the new regions that they now point into. - // Unsuccessfully evacuated code roots are not migrated. - void migrate_strong_code_roots(); - // Applies blk->do_code_blob() to each of the entries in // the strong code roots list void strong_code_roots_do(CodeBlobClosure* blk) const; + void clean_strong_code_roots(HeapRegion* hr); + // Returns the number of elements in the strong code roots list - size_t strong_code_roots_list_length() { + size_t strong_code_roots_list_length() const { return _code_roots.length(); } @@ -400,13 +401,11 @@ // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). // (Uses it to initialize from_card_cache). static void init_heap(uint max_regions) { - G1CodeRootSet::initialize(); - OtherRegionsTable::init_from_card_cache(max_regions); + OtherRegionsTable::initialize(max_regions); } - // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. - static void shrink_heap(uint new_n_regs) { - OtherRegionsTable::shrink_from_card_cache(new_n_regs); + static void invalidate(uint start_idx, uint num_regions) { + OtherRegionsTable::invalidate(start_idx, num_regions); } #ifndef PRODUCT @@ -433,26 +432,24 @@ }; class HeapRegionRemSetIterator : public StackObj { - - // The region RSet over which we're iterating. + private: + // The region RSet over which we are iterating. HeapRegionRemSet* _hrrs; // Local caching of HRRS fields. const BitMap* _coarse_map; - PerRegionTable** _fine_grain_regions; G1BlockOffsetSharedArray* _bosa; G1CollectedHeap* _g1h; - // The number yielded since initialization. + // The number of cards yielded since initialization. size_t _n_yielded_fine; size_t _n_yielded_coarse; size_t _n_yielded_sparse; - // Indicates what granularity of table that we're currently iterating over. + // Indicates what granularity of table that we are currently iterating over. // We start iterating over the sparse table, progress to the fine grain // table, and then finish with the coarse table. - // See HeapRegionRemSetIterator::has_next(). enum IterState { Sparse, Fine, @@ -460,38 +457,30 @@ }; IterState _is; - // In both kinds of iteration, heap offset of first card of current - // region. + // For both Coarse and Fine remembered set iteration this contains the + // first card number of the heap region we currently iterate over. size_t _cur_region_card_offset; - // Card offset within cur region. - size_t _cur_region_cur_card; - // Coarse table iteration fields: - - // Current region index; + // Current region index for the Coarse remembered set iteration. int _coarse_cur_region_index; size_t _coarse_cur_region_cur_card; bool coarse_has_next(size_t& card_index); - // Fine table iteration fields: + // The PRT we are currently iterating over. + PerRegionTable* _fine_cur_prt; + // Card offset within the current PRT. + size_t _cur_card_in_prt; - // Index of bucket-list we're working on. - int _fine_array_index; - - // Per Region Table we're doing within current bucket list. - PerRegionTable* _fine_cur_prt; - - /* SparsePRT::*/ SparsePRTIter _sparse_iter; - - void fine_find_next_non_null_prt(); - + // Update internal variables when switching to the given PRT. + void switch_to_prt(PerRegionTable* prt); bool fine_has_next(); bool fine_has_next(size_t& card_index); -public: - // We require an iterator to be initialized before use, so the - // constructor does little. + // The Sparse remembered set iterator. + SparsePRTIter _sparse_iter; + + public: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs); // If there remains one or more cards to be yielded, returns true and --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,261 +0,0 @@ -/* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "gc_implementation/g1/heapRegion.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" -#include "gc_implementation/g1/heapRegionSet.hpp" -#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" -#include "memory/allocation.hpp" - -// Private - -uint HeapRegionSeq::find_contiguous_from(uint from, uint num) { - uint len = length(); - assert(num > 1, "use this only for sequences of length 2 or greater"); - assert(from <= len, - err_msg("from: %u should be valid and <= than %u", from, len)); - - uint curr = from; - uint first = G1_NULL_HRS_INDEX; - uint num_so_far = 0; - while (curr < len && num_so_far < num) { - if (at(curr)->is_empty()) { - if (first == G1_NULL_HRS_INDEX) { - first = curr; - num_so_far = 1; - } else { - num_so_far += 1; - } - } else { - first = G1_NULL_HRS_INDEX; - num_so_far = 0; - } - curr += 1; - } - assert(num_so_far <= num, "post-condition"); - if (num_so_far == num) { - // we found enough space for the humongous object - assert(from <= first && first < len, "post-condition"); - assert(first < curr && (curr - first) == num, "post-condition"); - for (uint i = first; i < first + num; ++i) { - assert(at(i)->is_empty(), "post-condition"); - } - return first; - } else { - // we failed to find enough space for the humongous object - return G1_NULL_HRS_INDEX; - } -} - -// Public - -void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) { - assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0, - "bottom should be heap region aligned"); - assert((uintptr_t) end % HeapRegion::GrainBytes == 0, - "end should be heap region aligned"); - - _next_search_index = 0; - _allocated_length = 0; - - _regions.initialize(bottom, end, HeapRegion::GrainBytes); -} - -MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, - HeapWord* new_end, - FreeRegionList* list) { - assert(old_end < new_end, "don't call it otherwise"); - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - - HeapWord* next_bottom = old_end; - assert(heap_bottom() <= next_bottom, "invariant"); - while (next_bottom < new_end) { - assert(next_bottom < heap_end(), "invariant"); - uint index = length(); - - assert(index < max_length(), "otherwise we cannot expand further"); - if (index == 0) { - // We have not allocated any regions so far - assert(next_bottom == heap_bottom(), "invariant"); - } else { - // next_bottom should match the end of the last/previous region - assert(next_bottom == at(index - 1)->end(), "invariant"); - } - - if (index == _allocated_length) { - // We have to allocate a new HeapRegion. - HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom); - if (new_hr == NULL) { - // allocation failed, we bail out and return what we have done so far - return MemRegion(old_end, next_bottom); - } - assert(_regions.get_by_index(index) == NULL, "invariant"); - _regions.set_by_index(index, new_hr); - increment_allocated_length(); - } - // Have to increment the length first, otherwise we will get an - // assert failure at(index) below. - increment_length(); - HeapRegion* hr = at(index); - list->add_as_tail(hr); - - next_bottom = hr->end(); - } - assert(next_bottom == new_end, "post-condition"); - return MemRegion(old_end, next_bottom); -} - -uint HeapRegionSeq::free_suffix() { - uint res = 0; - uint index = length(); - while (index > 0) { - index -= 1; - if (!at(index)->is_empty()) { - break; - } - res += 1; - } - return res; -} - -uint HeapRegionSeq::find_contiguous(uint num) { - assert(num > 1, "use this only for sequences of length 2 or greater"); - assert(_next_search_index <= length(), - err_msg("_next_search_index: %u should be valid and <= than %u", - _next_search_index, length())); - - uint start = _next_search_index; - uint res = find_contiguous_from(start, num); - if (res == G1_NULL_HRS_INDEX && start > 0) { - // Try starting from the beginning. If _next_search_index was 0, - // no point in doing this again. - res = find_contiguous_from(0, num); - } - if (res != G1_NULL_HRS_INDEX) { - assert(res < length(), err_msg("res: %u should be valid", res)); - _next_search_index = res + num; - assert(_next_search_index <= length(), - err_msg("_next_search_index: %u should be valid and <= than %u", - _next_search_index, length())); - } - return res; -} - -void HeapRegionSeq::iterate(HeapRegionClosure* blk) const { - iterate_from((HeapRegion*) NULL, blk); -} - -void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const { - uint hr_index = 0; - if (hr != NULL) { - hr_index = hr->hrs_index(); - } - - uint len = length(); - for (uint i = hr_index; i < len; i += 1) { - bool res = blk->doHeapRegion(at(i)); - if (res) { - blk->incomplete(); - return; - } - } - for (uint i = 0; i < hr_index; i += 1) { - bool res = blk->doHeapRegion(at(i)); - if (res) { - blk->incomplete(); - return; - } - } -} - -uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) { - // Reset this in case it's currently pointing into the regions that - // we just removed. - _next_search_index = 0; - - assert(length() > 0, "the region sequence should not be empty"); - assert(length() <= _allocated_length, "invariant"); - assert(_allocated_length > 0, "we should have at least one region committed"); - assert(num_regions_to_remove < length(), "We should never remove all regions"); - - uint i = 0; - for (; i < num_regions_to_remove; i++) { - HeapRegion* cur = at(length() - 1); - - if (!cur->is_empty()) { - // We have to give up if the region can not be moved - break; - } - assert(!cur->isHumongous(), "Humongous regions should not be empty"); - - decrement_length(); - } - return i; -} - -#ifndef PRODUCT -void HeapRegionSeq::verify_optional() { - guarantee(length() <= _allocated_length, - err_msg("invariant: _length: %u _allocated_length: %u", - length(), _allocated_length)); - guarantee(_allocated_length <= max_length(), - err_msg("invariant: _allocated_length: %u _max_length: %u", - _allocated_length, max_length())); - guarantee(_next_search_index <= length(), - err_msg("invariant: _next_search_index: %u _length: %u", - _next_search_index, length())); - - HeapWord* prev_end = heap_bottom(); - for (uint i = 0; i < _allocated_length; i += 1) { - HeapRegion* hr = _regions.get_by_index(i); - guarantee(hr != NULL, err_msg("invariant: i: %u", i)); - guarantee(hr->bottom() == prev_end, - err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, - i, HR_FORMAT_PARAMS(hr), p2i(prev_end))); - guarantee(hr->hrs_index() == i, - err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index())); - if (i < length()) { - // Asserts will fire if i is >= _length - HeapWord* addr = hr->bottom(); - guarantee(addr_to_region(addr) == hr, "sanity"); - guarantee(addr_to_region_unsafe(addr) == hr, "sanity"); - } else { - guarantee(hr->is_empty(), "sanity"); - guarantee(!hr->isHumongous(), "sanity"); - // using assert instead of guarantee here since containing_set() - // is only available in non-product builds. - assert(hr->containing_set() == NULL, "sanity"); - } - if (hr->startsHumongous()) { - prev_end = hr->orig_end(); - } else { - prev_end = hr->end(); - } - } - for (uint i = _allocated_length; i < max_length(); i += 1) { - guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i)); - } -} -#endif // PRODUCT --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,158 +0,0 @@ -/* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP -#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP - -#include "gc_implementation/g1/g1BiasedArray.hpp" - -class HeapRegion; -class HeapRegionClosure; -class FreeRegionList; - -class G1HeapRegionTable : public G1BiasedMappedArray { - protected: - virtual HeapRegion* default_value() const { return NULL; } -}; - -// This class keeps track of the region metadata (i.e., HeapRegion -// instances). They are kept in the _regions array in address -// order. A region's index in the array corresponds to its index in -// the heap (i.e., 0 is the region at the bottom of the heap, 1 is -// the one after it, etc.). Two regions that are consecutive in the -// array should also be adjacent in the address space (i.e., -// region(i).end() == region(i+1).bottom(). -// -// We create a HeapRegion when we commit the region's address space -// for the first time. When we uncommit the address space of a -// region we retain the HeapRegion to be able to re-use it in the -// future (in case we recommit it). -// -// We keep track of three lengths: -// -// * _committed_length (returned by length()) is the number of currently -// committed regions. -// * _allocated_length (not exposed outside this class) is the -// number of regions for which we have HeapRegions. -// * max_length() returns the maximum number of regions the heap can have. -// -// and maintain that: _committed_length <= _allocated_length <= max_length() - -class HeapRegionSeq: public CHeapObj { - friend class VMStructs; - - G1HeapRegionTable _regions; - - // The number of regions committed in the heap. - uint _committed_length; - - // A hint for which index to start searching from for humongous - // allocations. - uint _next_search_index; - - // The number of regions for which we have allocated HeapRegions for. - uint _allocated_length; - - // Find a contiguous set of empty regions of length num, starting - // from the given index. - uint find_contiguous_from(uint from, uint num); - - void increment_allocated_length() { - assert(_allocated_length < max_length(), "pre-condition"); - _allocated_length++; - } - - void increment_length() { - assert(length() < max_length(), "pre-condition"); - _committed_length++; - } - - void decrement_length() { - assert(length() > 0, "pre-condition"); - _committed_length--; - } - - HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); } - HeapWord* heap_end() const {return _regions.end_address_mapped(); } - - public: - // Empty contructor, we'll initialize it with the initialize() method. - HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { } - - void initialize(HeapWord* bottom, HeapWord* end); - - // Return the HeapRegion at the given index. Assume that the index - // is valid. - inline HeapRegion* at(uint index) const; - - // If addr is within the committed space return its corresponding - // HeapRegion, otherwise return NULL. - inline HeapRegion* addr_to_region(HeapWord* addr) const; - - // Return the HeapRegion that corresponds to the given - // address. Assume the address is valid. - inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const; - - // Return the number of regions that have been committed in the heap. - uint length() const { return _committed_length; } - - // Return the maximum number of regions in the heap. - uint max_length() const { return (uint)_regions.length(); } - - // Expand the sequence to reflect that the heap has grown from - // old_end to new_end. Either create new HeapRegions, or re-use - // existing ones, and return them in the given list. Returns the - // memory region that covers the newly-created regions. If a - // HeapRegion allocation fails, the result memory region might be - // smaller than the desired one. - MemRegion expand_by(HeapWord* old_end, HeapWord* new_end, - FreeRegionList* list); - - // Return the number of contiguous regions at the end of the sequence - // that are available for allocation. - uint free_suffix(); - - // Find a contiguous set of empty regions of length num and return - // the index of the first region or G1_NULL_HRS_INDEX if the - // search was unsuccessful. - uint find_contiguous(uint num); - - // Apply blk->doHeapRegion() on all committed regions in address order, - // terminating the iteration early if doHeapRegion() returns true. - void iterate(HeapRegionClosure* blk) const; - - // As above, but start the iteration from hr and loop around. If hr - // is NULL, we start from the first region in the heap. - void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const; - - // Tag as uncommitted as many regions that are completely free as - // possible, up to num_regions_to_remove, from the suffix of the committed - // sequence. Return the actual number of removed regions. - uint shrink_by(uint num_regions_to_remove); - - // Do some sanity checking. - void verify_optional() PRODUCT_RETURN; -}; - -#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP -#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP - -#include "gc_implementation/g1/heapRegion.hpp" -#include "gc_implementation/g1/heapRegionSeq.hpp" - -inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const { - HeapRegion* hr = _regions.get_by_address(addr); - assert(hr != NULL, "invariant"); - return hr; -} - -inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const { - if (addr != NULL && addr < heap_end()) { - assert(addr >= heap_bottom(), - err_msg("addr: " PTR_FORMAT " bottom: " PTR_FORMAT, p2i(addr), p2i(heap_bottom()))); - return addr_to_region_unsafe(addr); - } - return NULL; -} - -inline HeapRegion* HeapRegionSeq::at(uint index) const { - assert(index < length(), "pre-condition"); - HeapRegion* hr = _regions.get_by_index(index); - assert(hr != NULL, "sanity"); - assert(hr->hrs_index() == index, "sanity"); - return hr; -} - -#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSet.inline.hpp" @@ -38,11 +39,13 @@ #ifndef PRODUCT void HeapRegionSetBase::verify_region(HeapRegion* hr) { - assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrs_index())); - assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrs_index())); // currently we don't use these sets for young regions - assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrs_index(), name())); - assert(hr->is_empty() == regions_empty(), err_msg("Wrong empty state for region %u and set %s", hr->hrs_index(), name())); - assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrs_index())); + assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index())); + assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions + assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name())); + assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name())); + assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name())); + assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name())); + assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index())); } #endif @@ -67,7 +70,7 @@ // Do the basic verification first before we do the checks over the regions. HeapRegionSetBase::verify(); - _verify_in_progress = true; + _verify_in_progress = true; } void HeapRegionSetBase::verify_end() { @@ -84,16 +87,16 @@ out->print_cr("Set: %s ("PTR_FORMAT")", name(), this); out->print_cr(" Region Assumptions"); out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous())); - out->print_cr(" empty : %s", BOOL_TO_STR(regions_empty())); + out->print_cr(" free : %s", BOOL_TO_STR(regions_free())); out->print_cr(" Attributes"); out->print_cr(" length : %14u", length()); out->print_cr(" total capacity : "SIZE_FORMAT_W(14)" bytes", total_capacity_bytes()); } -HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool empty, HRSMtSafeChecker* mt_safety_checker) +HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker) : _name(name), _verify_in_progress(false), - _is_humongous(humongous), _is_empty(empty), _mt_safety_checker(mt_safety_checker), + _is_humongous(humongous), _is_free(free), _mt_safety_checker(mt_safety_checker), _count() { } @@ -103,62 +106,7 @@ } void FreeRegionList::fill_in_ext_msg_extra(hrs_ext_msg* msg) { - msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail()); -} - -void FreeRegionList::add_as_head_or_tail(FreeRegionList* from_list, bool as_head) { - check_mt_safety(); - from_list->check_mt_safety(); - - verify_optional(); - from_list->verify_optional(); - - if (from_list->is_empty()) { - return; - } - -#ifdef ASSERT - FreeRegionListIterator iter(from_list); - while (iter.more_available()) { - HeapRegion* hr = iter.get_next(); - // In set_containing_set() we check that we either set the value - // from NULL to non-NULL or vice versa to catch bugs. So, we have - // to NULL it first before setting it to the value. - hr->set_containing_set(NULL); - hr->set_containing_set(this); - } -#endif // ASSERT - - if (_head == NULL) { - assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant")); - _head = from_list->_head; - _tail = from_list->_tail; - } else { - assert(length() > 0 && _tail != NULL, hrs_ext_msg(this, "invariant")); - if (as_head) { - from_list->_tail->set_next(_head); - _head->set_prev(from_list->_tail); - _head = from_list->_head; - } else { - _tail->set_next(from_list->_head); - from_list->_head->set_prev(_tail); - _tail = from_list->_tail; - } - } - - _count.increment(from_list->length(), from_list->total_capacity_bytes()); - from_list->clear(); - - verify_optional(); - from_list->verify_optional(); -} - -void FreeRegionList::add_as_head(FreeRegionList* from_list) { - add_as_head_or_tail(from_list, true /* as_head */); -} - -void FreeRegionList::add_as_tail(FreeRegionList* from_list) { - add_as_head_or_tail(from_list, false /* as_head */); + msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, _head, _tail); } void FreeRegionList::remove_all() { @@ -191,11 +139,6 @@ return; } - if (is_empty()) { - add_as_head(from_list); - return; - } - #ifdef ASSERT FreeRegionListIterator iter(from_list); while (iter.more_available()) { @@ -208,39 +151,45 @@ } #endif // ASSERT - HeapRegion* curr_to = _head; - HeapRegion* curr_from = from_list->_head; + if (is_empty()) { + assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant")); + _head = from_list->_head; + _tail = from_list->_tail; + } else { + HeapRegion* curr_to = _head; + HeapRegion* curr_from = from_list->_head; - while (curr_from != NULL) { - while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) { - curr_to = curr_to->next(); + while (curr_from != NULL) { + while (curr_to != NULL && curr_to->hrm_index() < curr_from->hrm_index()) { + curr_to = curr_to->next(); + } + + if (curr_to == NULL) { + // The rest of the from list should be added as tail + _tail->set_next(curr_from); + curr_from->set_prev(_tail); + curr_from = NULL; + } else { + HeapRegion* next_from = curr_from->next(); + + curr_from->set_next(curr_to); + curr_from->set_prev(curr_to->prev()); + if (curr_to->prev() == NULL) { + _head = curr_from; + } else { + curr_to->prev()->set_next(curr_from); + } + curr_to->set_prev(curr_from); + + curr_from = next_from; + } } - if (curr_to == NULL) { - // The rest of the from list should be added as tail - _tail->set_next(curr_from); - curr_from->set_prev(_tail); - curr_from = NULL; - } else { - HeapRegion* next_from = curr_from->next(); - - curr_from->set_next(curr_to); - curr_from->set_prev(curr_to->prev()); - if (curr_to->prev() == NULL) { - _head = curr_from; - } else { - curr_to->prev()->set_next(curr_from); - } - curr_to->set_prev(curr_from); - - curr_from = next_from; + if (_tail->hrm_index() < from_list->_tail->hrm_index()) { + _tail = from_list->_tail; } } - if (_tail->hrs_index() < from_list->_tail->hrs_index()) { - _tail = from_list->_tail; - } - _count.increment(from_list->length(), from_list->total_capacity_bytes()); from_list->clear(); @@ -248,68 +197,59 @@ from_list->verify_optional(); } -void FreeRegionList::remove_all_pending(uint target_count) { +void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) { check_mt_safety(); - assert(target_count > 1, hrs_ext_msg(this, "pre-condition")); + assert(num_regions >= 1, hrs_ext_msg(this, "pre-condition")); assert(!is_empty(), hrs_ext_msg(this, "pre-condition")); verify_optional(); DEBUG_ONLY(uint old_length = length();) - HeapRegion* curr = _head; + HeapRegion* curr = first; uint count = 0; - while (curr != NULL) { + while (count < num_regions) { verify_region(curr); HeapRegion* next = curr->next(); HeapRegion* prev = curr->prev(); - if (curr->pending_removal()) { - assert(count < target_count, - hrs_err_msg("[%s] should not come across more regions " - "pending for removal than target_count: %u", - name(), target_count)); + assert(count < num_regions, + hrs_err_msg("[%s] should not come across more regions " + "pending for removal than num_regions: %u", + name(), num_regions)); - if (prev == NULL) { - assert(_head == curr, hrs_ext_msg(this, "invariant")); - _head = next; - } else { - assert(_head != curr, hrs_ext_msg(this, "invariant")); - prev->set_next(next); - } - if (next == NULL) { - assert(_tail == curr, hrs_ext_msg(this, "invariant")); - _tail = prev; - } else { - assert(_tail != curr, hrs_ext_msg(this, "invariant")); - next->set_prev(prev); - } - if (_last = curr) { - _last = NULL; - } + if (prev == NULL) { + assert(_head == curr, hrs_ext_msg(this, "invariant")); + _head = next; + } else { + assert(_head != curr, hrs_ext_msg(this, "invariant")); + prev->set_next(next); + } + if (next == NULL) { + assert(_tail == curr, hrs_ext_msg(this, "invariant")); + _tail = prev; + } else { + assert(_tail != curr, hrs_ext_msg(this, "invariant")); + next->set_prev(prev); + } + if (_last = curr) { + _last = NULL; + } - curr->set_next(NULL); - curr->set_prev(NULL); - remove(curr); - curr->set_pending_removal(false); + curr->set_next(NULL); + curr->set_prev(NULL); + remove(curr); - count += 1; - - // If we have come across the target number of regions we can - // just bail out. However, for debugging purposes, we can just - // carry on iterating to make sure there are not more regions - // tagged with pending removal. - DEBUG_ONLY(if (count == target_count) break;) - } + count++; curr = next; } - assert(count == target_count, - hrs_err_msg("[%s] count: %u should be == target_count: %u", - name(), count, target_count)); - assert(length() + target_count == old_length, + assert(count == num_regions, + hrs_err_msg("[%s] count: %u should be == num_regions: %u", + name(), count, num_regions)); + assert(length() + num_regions == old_length, hrs_err_msg("[%s] new length should be consistent " - "new length: %u old length: %u target_count: %u", - name(), length(), old_length, target_count)); + "new length: %u old length: %u num_regions: %u", + name(), length(), old_length, num_regions)); verify_optional(); } @@ -348,10 +288,12 @@ hr->print_on(out); } } + + out->cr(); } void FreeRegionList::verify_list() { - HeapRegion* curr = head(); + HeapRegion* curr = _head; HeapRegion* prev1 = NULL; HeapRegion* prev0 = NULL; uint count = 0; @@ -369,8 +311,8 @@ if (curr->next() != NULL) { guarantee(curr->next()->prev() == curr, "Next or prev pointers messed up"); } - guarantee(curr->hrs_index() == 0 || curr->hrs_index() > last_index, "List should be sorted"); - last_index = curr->hrs_index(); + guarantee(curr->hrm_index() == 0 || curr->hrm_index() > last_index, "List should be sorted"); + last_index = curr->hrm_index(); capacity += curr->capacity(); @@ -379,7 +321,7 @@ curr = curr->next(); } - guarantee(tail() == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), tail()->hrs_index(), prev0->hrs_index())); + guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrm_index(), prev0->hrm_index())); guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next"); guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count)); guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, @@ -463,3 +405,41 @@ "master humongous set MT safety protocol outside a safepoint"); } } + +void FreeRegionList_test() { + FreeRegionList l("test"); + + const uint num_regions_in_test = 5; + // Create a fake heap. It does not need to be valid, as the HeapRegion constructor + // does not access it. + MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords); + // Allocate a fake BOT because the HeapRegion constructor initializes + // the BOT. + size_t bot_size = G1BlockOffsetSharedArray::compute_size(heap.word_size()); + HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC); + ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size())); + G1RegionToSpaceMapper* bot_storage = + G1RegionToSpaceMapper::create_mapper(bot_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + G1BlockOffsetSharedArray::N_bytes, + mtGC); + G1BlockOffsetSharedArray oa(heap, bot_storage); + bot_storage->commit_regions(0, num_regions_in_test); + HeapRegion hr0(0, &oa, heap); + HeapRegion hr1(1, &oa, heap); + HeapRegion hr2(2, &oa, heap); + HeapRegion hr3(3, &oa, heap); + HeapRegion hr4(4, &oa, heap); + l.add_ordered(&hr1); + l.add_ordered(&hr0); + l.add_ordered(&hr3); + l.add_ordered(&hr4); + l.add_ordered(&hr2); + assert(l.length() == num_regions_in_test, "wrong length"); + l.verify_list(); + + bot_storage->uncommit_regions(0, num_regions_in_test); + delete bot_storage; + FREE_C_HEAP_ARRAY(HeapWord, bot_data, mtGC); +} --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -81,7 +81,7 @@ friend class VMStructs; private: bool _is_humongous; - bool _is_empty; + bool _is_free; HRSMtSafeChecker* _mt_safety_checker; protected: @@ -102,9 +102,9 @@ // not. Only used during verification. bool regions_humongous() { return _is_humongous; } - // Indicates whether all regions in the set should be empty or + // Indicates whether all regions in the set should be free or // not. Only used during verification. - bool regions_empty() { return _is_empty; } + bool regions_free() { return _is_free; } void check_mt_safety() { if (_mt_safety_checker != NULL) { @@ -114,12 +114,12 @@ virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { } - HeapRegionSetBase(const char* name, bool humongous, bool empty, HRSMtSafeChecker* mt_safety_checker); + HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker); public: const char* name() { return _name; } - uint length() { return _count.length(); } + uint length() const { return _count.length(); } bool is_empty() { return _count.length() == 0; } @@ -162,7 +162,7 @@ // diagnosing failures. class hrs_ext_msg : public hrs_err_msg { public: - hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s","") { + hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s", "") { set->fill_in_ext_msg(this, message); } }; @@ -171,7 +171,7 @@ do { \ assert(((_set1_)->regions_humongous() == \ (_set2_)->regions_humongous()) && \ - ((_set1_)->regions_empty() == (_set2_)->regions_empty()), \ + ((_set1_)->regions_free() == (_set2_)->regions_free()), \ hrs_err_msg("the contents of set %s and set %s should match", \ (_set1_)->name(), (_set2_)->name())); \ } while (0) @@ -184,7 +184,7 @@ class HeapRegionSet : public HeapRegionSetBase { public: HeapRegionSet(const char* name, bool humongous, HRSMtSafeChecker* mt_safety_checker): - HeapRegionSetBase(name, humongous, false /* empty */, mt_safety_checker) { } + HeapRegionSetBase(name, humongous, false /* free */, mt_safety_checker) { } void bulk_remove(const HeapRegionSetCount& removed) { _count.decrement(removed.length(), removed.capacity()); @@ -192,13 +192,9 @@ }; // A set that links all the regions added to it in a doubly-linked -// list. We should try to avoid doing operations that iterate over +// sorted list. We should try to avoid doing operations that iterate over // such lists in performance critical paths. Typically we should -// add / remove one region at a time or concatenate two lists. There are -// two ways to treat your lists, ordered and un-ordered. All un-ordered -// operations are done in constant time. To keep a list ordered only use -// add_ordered() to add elements to the list. If a list is not ordered -// from start, there is no way to sort it later. +// add / remove one region at a time or concatenate two lists. class FreeRegionListIterator; @@ -210,13 +206,13 @@ HeapRegion* _tail; // _last is used to keep track of where we added an element the last - // time in ordered lists. It helps to improve performance when adding - // several ordered items in a row. + // time. It helps to improve performance when adding several ordered items in a row. HeapRegion* _last; static uint _unrealistically_long_length; - void add_as_head_or_tail(FreeRegionList* from_list, bool as_head); + inline HeapRegion* remove_from_head_impl(); + inline HeapRegion* remove_from_tail_impl(); protected: virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg); @@ -232,65 +228,33 @@ void verify_list(); - HeapRegion* head() { return _head; } - HeapRegion* tail() { return _tail; } +#ifdef ASSERT + bool contains(HeapRegion* hr) const { + return hr->containing_set() == this; + } +#endif static void set_unrealistically_long_length(uint len); // Add hr to the list. The region should not be a member of another set. // Assumes that the list is ordered and will preserve that order. The order - // is determined by hrs_index. + // is determined by hrm_index. inline void add_ordered(HeapRegion* hr); - // It adds hr to the list as the new head. The region should not be - // a member of another set. - inline void add_as_head(HeapRegion* hr); - - // It adds hr to the list as the new tail. The region should not be - // a member of another set. - inline void add_as_tail(HeapRegion* hr); - - // It removes and returns the head of the list. It assumes that the - // list is not empty so it will return a non-NULL value. - inline HeapRegion* remove_head(); - - // Convenience method. - inline HeapRegion* remove_head_or_null(); - - // Removes and returns the last element (_tail) of the list. It assumes - // that the list isn't empty so that it can return a non-NULL value. - inline HeapRegion* remove_tail(); - - // Convenience method - inline HeapRegion* remove_tail_or_null(); - // Removes from head or tail based on the given argument. - inline HeapRegion* remove_region(bool from_head); + HeapRegion* remove_region(bool from_head); // Merge two ordered lists. The result is also ordered. The order is - // determined by hrs_index. + // determined by hrm_index. void add_ordered(FreeRegionList* from_list); - // It moves the regions from from_list to this list and empties - // from_list. The new regions will appear in the same order as they - // were in from_list and be linked in the beginning of this list. - void add_as_head(FreeRegionList* from_list); - - // It moves the regions from from_list to this list and empties - // from_list. The new regions will appear in the same order as they - // were in from_list and be linked in the end of this list. - void add_as_tail(FreeRegionList* from_list); - // It empties the list by removing all regions from it. void remove_all(); - // It removes all regions in the list that are pending for removal - // (i.e., they have been tagged with "pending_removal"). The list - // must not be empty, target_count should reflect the exact number - // of regions that are pending for removal in the list, and - // target_count should be > 1 (currently, we never need to remove a - // single region using this). - void remove_all_pending(uint target_count); + // Remove all (contiguous) regions from first to first + num_regions -1 from + // this list. + // Num_regions must be > 1. + void remove_starting_at(HeapRegion* first, uint num_regions); virtual void verify(); @@ -298,7 +262,7 @@ }; // Iterator class that provides a convenient way to iterate over the -// regions of a HeapRegionLinkedList instance. +// regions of a FreeRegionList. class FreeRegionListIterator : public StackObj { private: @@ -324,7 +288,7 @@ } FreeRegionListIterator(FreeRegionList* list) : _curr(NULL), _list(list) { - _curr = list->head(); + _curr = list->_head; } }; --- ./hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -30,7 +30,8 @@ inline void HeapRegionSetBase::add(HeapRegion* hr) { check_mt_safety(); assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u")); - assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked")); + assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked")); + assert(hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked")); _count.increment(1u, hr->capacity()); hr->set_containing_set(this); @@ -40,7 +41,8 @@ inline void HeapRegionSetBase::remove(HeapRegion* hr) { check_mt_safety(); verify_region(hr); - assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked")); + assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked")); + assert(hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked")); hr->set_containing_set(NULL); assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition")); @@ -48,8 +50,7 @@ } inline void FreeRegionList::add_ordered(HeapRegion* hr) { - check_mt_safety(); - assert((length() == 0 && _head == NULL && _tail == NULL) || + assert((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) || (length() > 0 && _head != NULL && _tail != NULL), hrs_ext_msg(this, "invariant")); // add() will verify the region and check mt safety. @@ -59,14 +60,14 @@ if (_head != NULL) { HeapRegion* curr; - if (_last != NULL && _last->hrs_index() < hr->hrs_index()) { + if (_last != NULL && _last->hrm_index() < hr->hrm_index()) { curr = _last; } else { curr = _head; } // Find first entry with a Region Index larger than entry to insert. - while (curr != NULL && curr->hrs_index() < hr->hrs_index()) { + while (curr != NULL && curr->hrm_index() < hr->hrm_index()) { curr = curr->next(); } @@ -95,55 +96,48 @@ _last = hr; } -inline void FreeRegionList::add_as_head(HeapRegion* hr) { - assert((length() == 0 && _head == NULL && _tail == NULL) || - (length() > 0 && _head != NULL && _tail != NULL), - hrs_ext_msg(this, "invariant")); - // add() will verify the region and check mt safety. - add(hr); - - // Now link the region. - if (_head != NULL) { - hr->set_next(_head); - _head->set_prev(hr); - } else { - _tail = hr; - } - _head = hr; -} - -inline void FreeRegionList::add_as_tail(HeapRegion* hr) { - check_mt_safety(); - assert((length() == 0 && _head == NULL && _tail == NULL) || - (length() > 0 && _head != NULL && _tail != NULL), - hrs_ext_msg(this, "invariant")); - // add() will verify the region and check mt safety. - add(hr); - - // Now link the region. - if (_tail != NULL) { - _tail->set_next(hr); - hr->set_prev(_tail); - } else { - _head = hr; - } - _tail = hr; -} - -inline HeapRegion* FreeRegionList::remove_head() { - assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty")); - assert(length() > 0 && _head != NULL && _tail != NULL, - hrs_ext_msg(this, "invariant")); - - // We need to unlink it first. - HeapRegion* hr = _head; - _head = hr->next(); +inline HeapRegion* FreeRegionList::remove_from_head_impl() { + HeapRegion* result = _head; + _head = result->next(); if (_head == NULL) { _tail = NULL; } else { _head->set_prev(NULL); } - hr->set_next(NULL); + result->set_next(NULL); + return result; +} + +inline HeapRegion* FreeRegionList::remove_from_tail_impl() { + HeapRegion* result = _tail; + + _tail = result->prev(); + if (_tail == NULL) { + _head = NULL; + } else { + _tail->set_next(NULL); + } + result->set_prev(NULL); + return result; +} + +inline HeapRegion* FreeRegionList::remove_region(bool from_head) { + check_mt_safety(); + verify_optional(); + + if (is_empty()) { + return NULL; + } + assert(length() > 0 && _head != NULL && _tail != NULL, + hrs_ext_msg(this, "invariant")); + + HeapRegion* hr; + + if (from_head) { + hr = remove_from_head_impl(); + } else { + hr = remove_from_tail_impl(); + } if (_last == hr) { _last = NULL; @@ -154,56 +148,5 @@ return hr; } -inline HeapRegion* FreeRegionList::remove_head_or_null() { - check_mt_safety(); - if (!is_empty()) { - return remove_head(); - } else { - return NULL; - } -} +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP -inline HeapRegion* FreeRegionList::remove_tail() { - assert(!is_empty(), hrs_ext_msg(this, "The list should not be empty")); - assert(length() > 0 && _head != NULL && _tail != NULL, - hrs_ext_msg(this, "invariant")); - - // We need to unlink it first - HeapRegion* hr = _tail; - - _tail = hr->prev(); - if (_tail == NULL) { - _head = NULL; - } else { - _tail->set_next(NULL); - } - hr->set_prev(NULL); - - if (_last == hr) { - _last = NULL; - } - - // remove() will verify the region and check mt safety. - remove(hr); - return hr; -} - -inline HeapRegion* FreeRegionList::remove_tail_or_null() { - check_mt_safety(); - - if (!is_empty()) { - return remove_tail(); - } else { - return NULL; - } -} - -inline HeapRegion* FreeRegionList::remove_region(bool from_head) { - if (from_head) { - return remove_head_or_null(); - } else { - return remove_tail_or_null(); - } -} - -#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionType.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/heapRegionType.hpp" + +bool HeapRegionType::is_valid(Tag tag) { + switch (tag) { + case FreeTag: + case EdenTag: + case SurvTag: + case HumStartsTag: + case HumContTag: + case OldTag: + return true; + } + return false; +} + +const char* HeapRegionType::get_str() const { + hrt_assert_is_valid(_tag); + switch (_tag) { + case FreeTag: return "FREE"; + case EdenTag: return "EDEN"; + case SurvTag: return "SURV"; + case HumStartsTag: return "HUMS"; + case HumContTag: return "HUMC"; + case OldTag: return "OLD"; + } + ShouldNotReachHere(); + // keep some compilers happy + return NULL; +} + +const char* HeapRegionType::get_short_str() const { + hrt_assert_is_valid(_tag); + switch (_tag) { + case FreeTag: return "F"; + case EdenTag: return "E"; + case SurvTag: return "S"; + case HumStartsTag: return "HS"; + case HumContTag: return "HC"; + case OldTag: return "O"; + } + ShouldNotReachHere(); + // keep some compilers happy + return NULL; +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/g1/heapRegionType.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP + +#include "memory/allocation.hpp" + +#define hrt_assert_is_valid(tag) \ + assert(is_valid((tag)), err_msg("invalid HR type: %u", (uint) (tag))) + +class HeapRegionType VALUE_OBJ_CLASS_SPEC { +private: + // We encode the value of the heap region type so the generation can be + // determined quickly. The tag is split into two parts: + // + // major type (young, humongous) : top N-1 bits + // minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit + // + // If there's need to increase the number of minor types in the + // future, we'll have to increase the size of the latter and hence + // decrease the size of the former. + // + // 0000 0 [ 0] Free + // + // 0001 0 Young Mask + // 0001 0 [ 2] Eden + // 0001 1 [ 3] Survivor + // + // 0010 0 Humongous Mask + // 0010 0 [ 4] Humongous Starts + // 0010 1 [ 5] Humongous Continues + // + // 01000 [ 8] Old + typedef enum { + FreeTag = 0, + + YoungMask = 2, + EdenTag = YoungMask, + SurvTag = YoungMask + 1, + + HumMask = 4, + HumStartsTag = HumMask, + HumContTag = HumMask + 1, + + OldTag = 8 + } Tag; + + volatile Tag _tag; + + static bool is_valid(Tag tag); + + Tag get() const { + hrt_assert_is_valid(_tag); + return _tag; + } + + // Sets the type to 'tag'. + void set(Tag tag) { + hrt_assert_is_valid(tag); + hrt_assert_is_valid(_tag); + _tag = tag; + } + + // Sets the type to 'tag', expecting the type to be 'before'. This + // is available for when we want to add sanity checking to the type + // transition. + void set_from(Tag tag, Tag before) { + hrt_assert_is_valid(tag); + hrt_assert_is_valid(before); + hrt_assert_is_valid(_tag); + assert(_tag == before, + err_msg("HR tag: %u, expected: %u new tag; %u", _tag, before, tag)); + _tag = tag; + } + +public: + // Queries + + bool is_free() const { return get() == FreeTag; } + + bool is_young() const { return (get() & YoungMask) != 0; } + bool is_eden() const { return get() == EdenTag; } + bool is_survivor() const { return get() == SurvTag; } + + bool is_humongous() const { return (get() & HumMask) != 0; } + bool is_starts_humongous() const { return get() == HumStartsTag; } + bool is_continues_humongous() const { return get() == HumContTag; } + + bool is_old() const { return get() == OldTag; } + + // Setters + + void set_free() { set(FreeTag); } + + void set_eden() { set_from(EdenTag, FreeTag); } + void set_eden_pre_gc() { set_from(EdenTag, SurvTag); } + void set_survivor() { set_from(SurvTag, FreeTag); } + + void set_starts_humongous() { set_from(HumStartsTag, FreeTag); } + void set_continues_humongous() { set_from(HumContTag, FreeTag); } + + void set_old() { set(OldTag); } + + // Misc + + const char* get_str() const; + const char* get_short_str() const; + + HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -285,37 +285,6 @@ _par_closures[i] = par_closure; } -void SATBMarkQueueSet::iterate_closure_all_threads() { - for(JavaThread* t = Threads::first(); t; t = t->next()) { - t->satb_mark_queue().apply_closure_and_empty(_closure); - } - shared_satb_queue()->apply_closure_and_empty(_closure); -} - -void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) { - SharedHeap* sh = SharedHeap::heap(); - int parity = sh->strong_roots_parity(); - - for(JavaThread* t = Threads::first(); t; t = t->next()) { - if (t->claim_oops_do(true, parity)) { - t->satb_mark_queue().apply_closure_and_empty(_par_closures[worker]); - } - } - - // We also need to claim the VMThread so that its parity is updated - // otherwise the next call to Thread::possibly_parallel_oops_do inside - // a StrongRootsScope might skip the VMThread because it has a stale - // parity that matches the parity set by the StrongRootsScope - // - // Whichever worker succeeds in claiming the VMThread gets to do - // the shared queue. - - VMThread* vmt = VMThread::vm_thread(); - if (vmt->claim_oops_do(true, parity)) { - shared_satb_queue()->apply_closure_and_empty(_par_closures[worker]); - } -} - bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par, uint worker) { BufferNode* nd = NULL; --- ./hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -33,7 +33,9 @@ // A ptrQueue whose elements are "oops", pointers to object heads. class ObjPtrQueue: public PtrQueue { + friend class Threads; friend class SATBMarkQueueSet; + friend class G1RemarkThreadsClosure; private: // Filter out unwanted entries from the buffer. @@ -118,13 +120,6 @@ // closures, one for each parallel GC thread. void set_par_closure(int i, ObjectClosure* closure); - // Apply the registered closure to all entries on each - // currently-active buffer and then empty the buffer. It should only - // be called serially and at a safepoint. - void iterate_closure_all_threads(); - // Parallel version of the above. - void par_iterate_closure_all_threads(uint worker); - // If there exists some completed buffer, pop it, then apply the // registered closure to all its elements, and return true. If no // completed buffers exist, return false. --- ./hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -370,7 +370,7 @@ } size_t RSHashTable::mem_size() const { - return sizeof(this) + + return sizeof(RSHashTable) + capacity() * (SparsePRTEntry::size() + sizeof(int)); } @@ -472,13 +472,13 @@ size_t SparsePRT::mem_size() const { // We ignore "_cur" here, because it either = _next, or else it is // on the deleted list. - return sizeof(this) + _next->mem_size(); + return sizeof(SparsePRT) + _next->mem_size(); } bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) { #if SPARSE_PRT_VERBOSE gclog_or_tty->print_cr(" Adding card %d from region %d to region %u sparse.", - card_index, region_id, _hr->hrs_index()); + card_index, region_id, _hr->hrm_index()); #endif if (_next->occupied_entries() * 2 > _next->capacity()) { expand(); @@ -530,7 +530,7 @@ #if SPARSE_PRT_VERBOSE gclog_or_tty->print_cr(" Expanded sparse table for %u to %d.", - _hr->hrs_index(), _next->capacity()); + _hr->hrm_index(), _next->capacity()); #endif for (size_t i = 0; i < last->capacity(); i++) { SparsePRTEntry* e = last->entry((int)i); --- ./hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,7 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP #include "gc_implementation/g1/heapRegion.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #define VM_STRUCTS_G1(nonstatic_field, static_field) \ @@ -34,21 +34,24 @@ static_field(HeapRegion, GrainBytes, size_t) \ static_field(HeapRegion, LogOfHRGrainBytes, int) \ \ + nonstatic_field(G1OffsetTableContigSpace, _top, HeapWord*) \ + \ nonstatic_field(G1HeapRegionTable, _base, address) \ nonstatic_field(G1HeapRegionTable, _length, size_t) \ nonstatic_field(G1HeapRegionTable, _biased_base, address) \ nonstatic_field(G1HeapRegionTable, _bias, size_t) \ nonstatic_field(G1HeapRegionTable, _shift_by, uint) \ \ - nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \ - nonstatic_field(HeapRegionSeq, _committed_length, uint) \ + nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \ + nonstatic_field(HeapRegionManager, _num_committed, uint) \ \ - nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \ - nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \ - nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \ + nonstatic_field(G1Allocator, _summary_bytes_used, size_t) \ + \ + nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \ nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \ nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \ nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \ + nonstatic_field(G1CollectedHeap, _allocator, G1Allocator*) \ \ nonstatic_field(G1MonitoringSupport, _eden_committed, size_t) \ nonstatic_field(G1MonitoringSupport, _eden_used, size_t) \ @@ -69,15 +72,18 @@ \ declare_type(G1CollectedHeap, SharedHeap) \ \ - declare_type(HeapRegion, ContiguousSpace) \ - declare_toplevel_type(HeapRegionSeq) \ + declare_type(G1OffsetTableContigSpace, CompactibleSpace) \ + declare_type(HeapRegion, G1OffsetTableContigSpace) \ + declare_toplevel_type(HeapRegionManager) \ declare_toplevel_type(HeapRegionSetBase) \ declare_toplevel_type(HeapRegionSetCount) \ declare_toplevel_type(G1MonitoringSupport) \ + declare_toplevel_type(G1Allocator) \ \ declare_toplevel_type(G1CollectedHeap*) \ declare_toplevel_type(HeapRegion*) \ declare_toplevel_type(G1MonitoringSupport*) \ + declare_toplevel_type(G1Allocator*) \ #endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP --- ./hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -45,7 +45,8 @@ void VM_G1CollectForAllocation::doit() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); GCCauseSetter x(g1h, _gc_cause); - _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded); + + _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded); assert(_result == NULL || _pause_succeeded, "if we get back a result, the pause should have succeeded"); } @@ -94,12 +95,13 @@ assert(!_should_initiate_conc_mark || ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) || - _gc_cause == GCCause::_g1_humongous_allocation), - "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle"); + _gc_cause == GCCause::_g1_humongous_allocation || + _gc_cause == GCCause::_update_allocation_context_stats_inc), + "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle"); if (_word_size > 0) { // An allocation has been requested. So, try to do that first. - _result = g1h->attempt_allocation_at_safepoint(_word_size, + _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(), false /* expect_null_cur_alloc_region */); if (_result != NULL) { // If we can successfully allocate before we actually do the @@ -152,7 +154,7 @@ g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); if (_pause_succeeded && _word_size > 0) { // An allocation had been requested. - _result = g1h->attempt_allocation_at_safepoint(_word_size, + _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(), true /* expect_null_cur_alloc_region */); } else { assert(_result == NULL, "invariant"); @@ -211,8 +213,12 @@ assert(_needs_pll, "don't call this otherwise"); // The caller may block while communicating // with the SLT thread in order to acquire/release the PLL. - ConcurrentMarkThread::slt()-> - manipulatePLL(SurrogateLockerThread::acquirePLL); + SurrogateLockerThread* slt = ConcurrentMarkThread::slt(); + if (slt != NULL) { + slt->manipulatePLL(SurrogateLockerThread::acquirePLL); + } else { + SurrogateLockerThread::report_missing_slt(); + } } void VM_CGC_Operation::release_and_notify_pending_list_lock() { @@ -226,7 +232,7 @@ void VM_CGC_Operation::doit() { gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm()); + GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()); SharedHeap* sh = SharedHeap::heap(); // This could go away if CollectedHeap gave access to _gc_is_active... if (sh != NULL) { --- ./hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP +#include "gc_implementation/g1/g1AllocationContext.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" // VM_operations for the G1 collector. @@ -40,6 +41,7 @@ size_t _word_size; HeapWord* _result; bool _pause_succeeded; + AllocationContext_t _allocation_context; public: VM_G1OperationWithAllocRequest(unsigned int gc_count_before, @@ -49,6 +51,8 @@ _word_size(word_size), _result(NULL), _pause_succeeded(false) { } HeapWord* result() { return _result; } bool pause_succeeded() { return _pause_succeeded; } + void set_allocation_context(AllocationContext_t context) { _allocation_context = context; } + AllocationContext_t allocation_context() { return _allocation_context; } }; class VM_G1CollectFull: public VM_GC_Operation { @@ -56,7 +60,7 @@ VM_G1CollectFull(unsigned int gc_count_before, unsigned int full_gc_count_before, GCCause::Cause cause) - : VM_GC_Operation(gc_count_before, cause, full_gc_count_before) { } + : VM_GC_Operation(gc_count_before, cause, full_gc_count_before, true) { } virtual VMOp_Type type() const { return VMOp_G1CollectFull; } virtual void doit(); virtual const char* name() const { --- ./hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -32,6 +32,7 @@ #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/virtualspace.hpp" #include "runtime/vmThread.hpp" --- ./hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,12 +28,12 @@ #include "gc_implementation/parNew/parOopClosures.inline.hpp" #include "gc_implementation/shared/adaptiveSizePolicy.hpp" #include "gc_implementation/shared/ageTable.hpp" -#include "gc_implementation/shared/parGCAllocBuffer.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" #include "gc_implementation/shared/gcHeapSummary.hpp" #include "gc_implementation/shared/gcTimer.hpp" #include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTraceTime.hpp" -#include "gc_implementation/shared/copyFailedInfo.hpp" +#include "gc_implementation/shared/parGCAllocBuffer.inline.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/defNewGeneration.inline.hpp" #include "memory/genCollectedHeap.hpp" @@ -50,7 +50,7 @@ #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" -#include "runtime/thread.hpp" +#include "runtime/thread.inline.hpp" #include "utilities/copy.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/workgroup.hpp" @@ -251,7 +251,7 @@ plab->set_word_size(buf_size); plab->set_buf(buf_space); record_survivor_plab(buf_space, buf_size); - obj = plab->allocate(word_sz); + obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); // Note that we cannot compare buf_size < word_sz below // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). assert(obj != NULL || plab->words_remaining() < word_sz, @@ -613,20 +613,21 @@ KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), gch->rem_set()->klass_rem_set()); - - int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, + &par_scan_state.to_space_root_closure(), + false); par_scan_state.start_strong_roots(); - gch->gen_process_strong_roots(_gen->level(), - true, // Process younger gens, if any, - // as strong roots. - false, // no scope; this is parallel code - true, // is scavenging - SharedHeap::ScanningOption(so), - &par_scan_state.to_space_root_closure(), - true, // walk *all* scavengable nmethods - &par_scan_state.older_gen_closure(), - &klass_scan_closure); + gch->gen_process_roots(_gen->level(), + true, // Process younger gens, if any, + // as strong roots. + false, // no scope; this is parallel code + SharedHeap::SO_ScavengeCodeCache, + GenCollectedHeap::StrongAndWeakRoots, + &par_scan_state.to_space_root_closure(), + &par_scan_state.older_gen_closure(), + &cld_scan_closure); + par_scan_state.end_strong_roots(); // "evacuate followers". @@ -957,7 +958,7 @@ size_policy->minor_collection_begin(); } - GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); @@ -1015,14 +1016,14 @@ ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, &task_executor, - _gc_timer); + _gc_timer, gc_tracer.gc_id()); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, - _gc_timer); + _gc_timer, gc_tracer.gc_id()); } gc_tracer.report_gc_reference_stats(stats); if (!promotion_failed()) { --- ./hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -69,7 +69,7 @@ ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier - // One of these two will be passed to process_strong_roots, which will + // One of these two will be passed to process_roots, which will // set its generation. The first is for two-gen configs where the // old gen collects the perm gen; the second is for arbitrary configs. // The second isn't used right now (it used to be used for the train, an @@ -168,7 +168,7 @@ HeapWord* alloc_in_to_space_slow(size_t word_sz); HeapWord* alloc_in_to_space(size_t word_sz) { - HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz); + HeapWord* obj = to_space_alloc_buffer()->allocate_aligned(word_sz, SurvivorAlignmentInBytes); if (obj != NULL) return obj; else return alloc_in_to_space_slow(word_sz); } --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -30,6 +30,7 @@ #include "gc_implementation/parallelScavenge/psYoungGen.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.psgc.inline.hpp" +#include "runtime/prefetch.inline.hpp" // Checks an individual oop for missing precise marks. Mark // may be either dirty or newgen. --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -30,6 +30,7 @@ #include "memory/allocation.inline.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.inline.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -71,7 +71,7 @@ if (_virtual_space != NULL && _virtual_space->expand_by(_reserved_byte_size)) { _region_start = covered_region.start(); _region_size = covered_region.word_size(); - idx_t* map = (idx_t*)_virtual_space->reserved_low_addr(); + BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr(); _beg_bits.set_map(map); _beg_bits.set_size(bits / 2); _end_bits.set_map(map + words / 2); --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -78,6 +78,7 @@ (HeapWord*)(heap_rs.base() + heap_rs.size())); CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); + barrier_set->initialize(); _barrier_set = barrier_set; oopDesc::set_bs(_barrier_set); if (_barrier_set == NULL) { @@ -484,10 +485,6 @@ young_gen()->eden_space()->ensure_parsability(); } -size_t ParallelScavengeHeap::unsafe_max_alloc() { - return young_gen()->eden_space()->free_in_bytes(); -} - size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { return young_gen()->eden_space()->tlab_capacity(thr); } --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -184,8 +184,6 @@ void accumulate_statistics_all_tlabs(); void resize_all_tlabs(); - size_t unsafe_max_alloc(); - bool supports_tlab_allocation() const { return true; } size_t tlab_capacity(Thread* thr) const; --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -53,13 +53,13 @@ ResourceMark rm; NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true); - CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true); + MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations); if (_java_thread != NULL) _java_thread->oops_do( @@ -82,7 +82,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); @@ -100,7 +100,7 @@ case threads: { ResourceMark rm; - CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true); + MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations); CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure); Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob); } @@ -153,7 +153,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("RefProcTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); @@ -209,7 +209,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("StealMarkingTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -242,7 +242,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -309,7 +309,7 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) { NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -324,7 +324,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -169,7 +169,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); + GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id()); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); @@ -513,7 +513,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer); + GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace(" 1"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); @@ -528,14 +528,14 @@ Universe::oops_do(mark_and_push_closure()); JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure()); - CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true); + MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations); Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob); ObjectSynchronizer::oops_do(mark_and_push_closure()); FlatProfiler::oops_do(mark_and_push_closure()); Management::oops_do(mark_and_push_closure()); JvmtiExport::oops_do(mark_and_push_closure()); SystemDictionary::always_strong_oops_do(mark_and_push_closure()); - ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true); + ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure()); // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); } @@ -548,7 +548,7 @@ ref_processor()->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( - is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer); + is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id()); gc_tracer()->report_gc_reference_stats(stats); } @@ -574,7 +574,7 @@ void PSMarkSweep::mark_sweep_phase2() { - GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer); + GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("2"); // Now all live objects are marked, compute the new object addresses. @@ -604,7 +604,7 @@ void PSMarkSweep::mark_sweep_phase3() { // Adjust the pointers to reflect the new locations - GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer); + GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("3"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); @@ -625,16 +625,16 @@ FlatProfiler::oops_do(adjust_pointer_closure()); Management::oops_do(adjust_pointer_closure()); JvmtiExport::oops_do(adjust_pointer_closure()); - // SO_AllClasses SystemDictionary::oops_do(adjust_pointer_closure()); - ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); + ClassLoaderDataGraph::cld_do(adjust_cld_closure()); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); - CodeCache::oops_do(adjust_pointer_closure()); + CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); + CodeCache::blobs_do(&adjust_from_blobs); StringTable::oops_do(adjust_pointer_closure()); ref_processor()->weak_oops_do(adjust_pointer_closure()); PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); @@ -647,7 +647,7 @@ void PSMarkSweep::mark_sweep_phase4() { EventMark m("4 compact heap"); - GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer); + GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("4"); // All pointers are now adjusted, move objects accordingly --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -40,11 +40,11 @@ static CollectorCounters* _counters; // Closure accessors - static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; } - static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; } - static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; } - static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; } - static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; } + static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; } + static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; } + static CLDClosure* follow_cld_closure() { return &MarkSweep::follow_cld_closure; } + static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; } + static CLDClosure* adjust_cld_closure() { return &MarkSweep::adjust_cld_closure; } static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; } debug_only(public:) // Used for PSParallelCompact debugging --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -32,6 +32,7 @@ #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "oops/oop.inline.hpp" +#include "runtime/prefetch.inline.hpp" PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL; --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -978,7 +978,7 @@ // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of minor // collections will have swapped the spaces an unknown number of times. - GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer); + GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = gc_heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); @@ -1021,7 +1021,7 @@ void PSParallelCompact::post_compact() { - GCTraceTime tm("post compact", print_phases(), true, &_gc_timer); + GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. @@ -1847,7 +1847,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { - GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer); + GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // trace("2"); #ifdef ASSERT @@ -2056,7 +2056,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); + GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); @@ -2351,7 +2351,7 @@ bool maximum_heap_compaction, ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them - GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer); + GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); @@ -2366,7 +2366,7 @@ ClassLoaderDataGraph::clear_claimed_marks(); { - GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer); + GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap::ParStrongRootsScope psrs; @@ -2395,24 +2395,24 @@ // Process reference objects found during marking { - GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer); + GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ReferenceProcessorStats stats; if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, - &task_executor, &_gc_timer); + &task_executor, &_gc_timer, _gc_tracer.gc_id()); } else { stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, - &_gc_timer); + &_gc_timer, _gc_tracer.gc_id()); } gc_tracer->report_gc_reference_stats(stats); } - GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer); + GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // This is the point where the entire marking should have completed. assert(cm->marking_stacks_empty(), "Marking should have completed"); @@ -2451,7 +2451,7 @@ void PSParallelCompact::adjust_roots() { // Adjust the pointers to reflect the new locations - GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer); + GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // Need new claim bits when tracing through and adjusting pointers. ClassLoaderDataGraph::clear_claimed_marks(); @@ -2465,7 +2465,6 @@ FlatProfiler::oops_do(adjust_pointer_closure()); Management::oops_do(adjust_pointer_closure()); JvmtiExport::oops_do(adjust_pointer_closure()); - // SO_AllClasses SystemDictionary::oops_do(adjust_pointer_closure()); ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); @@ -2474,7 +2473,8 @@ // Global (weak) JNI handles JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); - CodeCache::oops_do(adjust_pointer_closure()); + CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); + CodeCache::blobs_do(&adjust_from_blobs); StringTable::oops_do(adjust_pointer_closure()); ref_processor()->weak_oops_do(adjust_pointer_closure()); // Roots were visited so references into the young gen in roots @@ -2487,7 +2487,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, uint parallel_gc_threads) { - GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer); + GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // Find the threads that are active unsigned int which = 0; @@ -2561,7 +2561,7 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, uint parallel_gc_threads) { - GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer); + GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelCompactData& sd = PSParallelCompact::summary_data(); @@ -2643,7 +2643,7 @@ GCTaskQueue* q, ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads) { - GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer); + GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // Once a thread has drained it's stack, it should try to steal regions from // other threads. @@ -2691,7 +2691,7 @@ void PSParallelCompact::compact() { // trace("5"); - GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer); + GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); @@ -2708,7 +2708,7 @@ enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); { - GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer); + GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); gc_task_manager()->execute_and_wait(q); @@ -2722,7 +2722,7 @@ { // Update the deferred objects, if any. Any compaction manager can be used. - GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer); + GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParCompactionManager* cm = ParCompactionManager::manager_array(0); for (unsigned int id = old_space_id; id < last_space_id; ++id) { update_deferred_objects(cm, SpaceId(id)); --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1004,6 +1004,10 @@ static bool _dwl_initialized; #endif // #ifdef ASSERT + + public: + static ParallelOldTracer* gc_tracer() { return &_gc_tracer; } + private: static void initialize_space_info(); --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_HPP #include "gc_implementation/parallelScavenge/objectStartArray.hpp" +#include "gc_interface/collectedHeap.inline.hpp" #include "memory/allocation.hpp" // @@ -94,23 +95,9 @@ PSYoungPromotionLAB() { } // Not MT safe - HeapWord* allocate(size_t size) { - // Can't assert this, when young fills, we keep the LAB around, but flushed. - // assert(_state != flushed, "Sanity"); - HeapWord* obj = top(); - HeapWord* new_top = obj + size; - // The 'new_top>obj' check is needed to detect overflow of obj+size. - if (new_top > obj && new_top <= end()) { - set_top(new_top); - assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top), - "checking alignment"); - return obj; - } + inline HeapWord* allocate(size_t size); - return NULL; - } - - debug_only(virtual bool lab_is_valid(MemRegion lab)); + debug_only(virtual bool lab_is_valid(MemRegion lab);) }; class PSOldPromotionLAB : public PSPromotionLAB { --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_INLINE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_INLINE_HPP + +#include "gc_implementation/parallelScavenge/psPromotionLAB.hpp" +#include "gc_interface/collectedHeap.inline.hpp" + +HeapWord* PSYoungPromotionLAB::allocate(size_t size) { + // Can't assert this, when young fills, we keep the LAB around, but flushed. + // assert(_state != flushed, "Sanity"); + HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end(), SurvivorAlignmentInBytes); + if (obj == NULL) { + return NULL; + } + + HeapWord* new_top = obj + size; + // The 'new_top>obj' check is needed to detect overflow of obj+size. + if (new_top > obj && new_top <= end()) { + set_top(new_top); + assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_object_aligned((intptr_t)new_top), + "checking alignment"); + return obj; + } else { + set_top(obj); + return NULL; + } +} + +#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_INLINE_HPP --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -27,6 +27,7 @@ #include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" +#include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "oops/oop.psgc.inline.hpp" --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -331,7 +331,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); + GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); @@ -397,7 +397,7 @@ // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { - GCTraceTime tm("Scavenge", false, false, &_gc_timer); + GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); @@ -439,7 +439,7 @@ // Process reference objects discovered during scavenge { - GCTraceTime tm("References", false, false, &_gc_timer); + GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id()); reference_processor()->setup_policy(false); // not always_clear reference_processor()->set_active_mt_degree(active_workers); @@ -450,10 +450,10 @@ PSRefProcTaskExecutor task_executor; stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, - &_gc_timer); + &_gc_timer, _gc_tracer.gc_id()); } else { stats = reference_processor()->process_discovered_references( - &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); + &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id()); } _gc_tracer.report_gc_reference_stats(stats); @@ -468,7 +468,7 @@ } { - GCTraceTime tm("StringTable", false, false, &_gc_timer); + GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id()); // Unlink any dead interned Strings and process the remaining live ones. PSScavengeRootsClosure root_closure(promotion_manager); StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); @@ -638,7 +638,7 @@ NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); { - GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); + GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id()); CodeCache::prune_scavenge_root_nmethods(); } --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -65,7 +65,7 @@ case threads: { ResourceMark rm; - CLDToOopClosure* cld_closure = NULL; // Not needed. All CLDs are already visited. + CLDClosure* cld_closure = NULL; // Not needed. All CLDs are already visited. Threads::oops_do(&roots_closure, cld_closure, NULL); } break; @@ -100,7 +100,7 @@ case code_cache: { - CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true); + MarkingCodeBlobClosure each_scavengable_code_blob(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations); CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob); } break; @@ -122,8 +122,8 @@ PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); PSScavengeRootsClosure roots_closure(pm); - CLDToOopClosure* roots_from_clds = NULL; // Not needed. All CLDs are already visited. - CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true); + CLDClosure* roots_from_clds = NULL; // Not needed. All CLDs are already visited. + MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations); if (_java_thread != NULL) _java_thread->oops_do(&roots_closure, roots_from_clds, &roots_in_blobs); --- ./hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -70,7 +70,7 @@ "must be a ParallelScavengeHeap"); GCCauseSetter gccs(heap, _gc_cause); - if (_gc_cause == GCCause::_gc_locker + if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) { // If (and only if) the scavenge fails, this will invoke a full gc. heap->invoke_scavenge(); --- ./hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -36,21 +36,10 @@ int ConcurrentGCThread::_CGC_flag = CGC_nil; -SuspendibleThreadSet ConcurrentGCThread::_sts; - ConcurrentGCThread::ConcurrentGCThread() : _should_terminate(false), _has_terminated(false) { - _sts.initialize(); }; -void ConcurrentGCThread::safepoint_synchronize() { - _sts.suspend_all(); -} - -void ConcurrentGCThread::safepoint_desynchronize() { - _sts.resume_all(); -} - void ConcurrentGCThread::create_and_start() { if (os::create_thread(this, os::cgc_thread)) { // XXX: need to set this to low priority @@ -91,78 +80,6 @@ ThreadLocalStorage::set_thread(NULL); } - -void SuspendibleThreadSet::initialize_work() { - MutexLocker x(STS_init_lock); - if (!_initialized) { - _m = new Monitor(Mutex::leaf, - "SuspendibleThreadSetLock", true); - _async = 0; - _async_stop = false; - _async_stopped = 0; - _initialized = true; - } -} - -void SuspendibleThreadSet::join() { - initialize(); - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag); - _async++; - assert(_async > 0, "Huh."); -} - -void SuspendibleThreadSet::leave() { - assert(_initialized, "Must be initialized."); - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - _async--; - assert(_async >= 0, "Huh."); - if (_async_stop) _m->notify_all(); -} - -void SuspendibleThreadSet::yield(const char* id) { - assert(_initialized, "Must be initialized."); - if (_async_stop) { - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - if (_async_stop) { - _async_stopped++; - assert(_async_stopped > 0, "Huh."); - if (_async_stopped == _async) { - if (ConcGCYieldTimeout > 0) { - double now = os::elapsedTime(); - guarantee((now - _suspend_all_start) * 1000.0 < - (double)ConcGCYieldTimeout, - "Long delay; whodunit?"); - } - } - _m->notify_all(); - while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag); - _async_stopped--; - assert(_async >= 0, "Huh"); - _m->notify_all(); - } - } -} - -void SuspendibleThreadSet::suspend_all() { - initialize(); // If necessary. - if (ConcGCYieldTimeout > 0) { - _suspend_all_start = os::elapsedTime(); - } - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - assert(!_async_stop, "Only one at a time."); - _async_stop = true; - while (_async_stopped < _async) _m->wait(Mutex::_no_safepoint_check_flag); -} - -void SuspendibleThreadSet::resume_all() { - assert(_initialized, "Must be initialized."); - MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); - assert(_async_stopped == _async, "Huh."); - _async_stop = false; - _m->notify_all(); -} - static void _sltLoop(JavaThread* thread, TRAPS) { SurrogateLockerThread* slt = (SurrogateLockerThread*)thread; slt->loop(); @@ -220,6 +137,13 @@ return res; } +void SurrogateLockerThread::report_missing_slt() { + vm_exit_during_initialization( + "GC before GC support fully initialized: " + "SLT is needed but has not yet been created."); + ShouldNotReachHere(); +} + void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) { MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag); assert(_buffer == empty, "Should be empty"); @@ -282,30 +206,3 @@ } assert(!_monitor.owned_by_self(), "Should unlock before exit."); } - - -// ===== STS Access From Outside CGCT ===== - -void ConcurrentGCThread::stsYield(const char* id) { - assert( Thread::current()->is_ConcurrentGC_thread(), - "only a conc GC thread can call this" ); - _sts.yield(id); -} - -bool ConcurrentGCThread::stsShouldYield() { - assert( Thread::current()->is_ConcurrentGC_thread(), - "only a conc GC thread can call this" ); - return _sts.should_yield(); -} - -void ConcurrentGCThread::stsJoin() { - assert( Thread::current()->is_ConcurrentGC_thread(), - "only a conc GC thread can call this" ); - _sts.join(); -} - -void ConcurrentGCThread::stsLeave() { - assert( Thread::current()->is_ConcurrentGC_thread(), - "only a conc GC thread can call this" ); - _sts.leave(); -} --- ./hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,55 +26,8 @@ #define SHARE_VM_GC_IMPLEMENTATION_SHARED_CONCURRENTGCTHREAD_HPP #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS +#include "gc_implementation/shared/suspendibleThreadSet.hpp" #include "runtime/thread.hpp" -#endif // INCLUDE_ALL_GCS - -class VoidClosure; - -// A SuspendibleThreadSet is (obviously) a set of threads that can be -// suspended. A thread can join and later leave the set, and periodically -// yield. If some thread (not in the set) requests, via suspend_all, that -// the threads be suspended, then the requesting thread is blocked until -// all the threads in the set have yielded or left the set. (Threads may -// not enter the set when an attempted suspension is in progress.) The -// suspending thread later calls resume_all, allowing the suspended threads -// to continue. - -class SuspendibleThreadSet { - Monitor* _m; - int _async; - bool _async_stop; - int _async_stopped; - bool _initialized; - double _suspend_all_start; - - void initialize_work(); - - public: - SuspendibleThreadSet() : _initialized(false) {} - - // Add the current thread to the set. May block if a suspension - // is in progress. - void join(); - // Removes the current thread from the set. - void leave(); - // Returns "true" iff an suspension is in progress. - bool should_yield() { return _async_stop; } - // Suspends the current thread if a suspension is in progress (for - // the duration of the suspension.) - void yield(const char* id); - // Return when all threads in the set are suspended. - void suspend_all(); - // Allow suspended threads to resume. - void resume_all(); - // Redundant initializations okay. - void initialize() { - // Double-check dirty read idiom. - if (!_initialized) initialize_work(); - } -}; - class ConcurrentGCThread: public NamedThread { friend class VMStructs; @@ -96,9 +49,6 @@ static int set_CGC_flag(int b) { return _CGC_flag |= b; } static int reset_CGC_flag(int b) { return _CGC_flag &= ~b; } - // All instances share this one set. - static SuspendibleThreadSet _sts; - // Create and start the thread (setting it's priority high.) void create_and_start(); @@ -121,25 +71,6 @@ // Tester bool is_ConcurrentGC_thread() const { return true; } - - static void safepoint_synchronize(); - static void safepoint_desynchronize(); - - // All overridings should probably do _sts::yield, but we allow - // overriding for distinguished debugging messages. Default is to do - // nothing. - virtual void yield() {} - - bool should_yield() { return _sts.should_yield(); } - - // they are prefixed by sts since there are already yield() and - // should_yield() (non-static) methods in this class and it was an - // easy way to differentiate them. - static void stsYield(const char* id); - static bool stsShouldYield(); - static void stsJoin(); - static void stsLeave(); - }; // The SurrogateLockerThread is used by concurrent GC threads for @@ -162,6 +93,9 @@ public: static SurrogateLockerThread* make(TRAPS); + // Terminate VM with error message that SLT needed but not yet created. + static void report_missing_slt(); + SurrogateLockerThread(); bool is_hidden_from_external_view() const { return true; } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/shared/gcId.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/gcId.hpp" +#include "runtime/safepoint.hpp" + +uint GCId::_next_id = 0; + +const GCId GCId::create() { + return GCId(_next_id++); +} +const GCId GCId::peek() { + return GCId(_next_id); +} +const GCId GCId::undefined() { + return GCId(UNDEFINED); +} +bool GCId::is_undefined() const { + return _id == UNDEFINED; +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/shared/gcId.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP + +#include "memory/allocation.hpp" + +class GCId VALUE_OBJ_CLASS_SPEC { + private: + uint _id; + GCId(uint id) : _id(id) {} + GCId() { } // Unused + + static uint _next_id; + static const uint UNDEFINED = (uint)-1; + + public: + uint id() const { + assert(_id != UNDEFINED, "Using undefined GC ID"); + return _id; + } + bool is_undefined() const; + + static const GCId create(); + static const GCId peek(); + static const GCId undefined(); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP --- ./hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc_implementation/shared/copyFailedInfo.hpp" #include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "gc_implementation/shared/gcTimer.hpp" #include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/objectCountEventSender.hpp" @@ -38,19 +39,14 @@ #include "gc_implementation/g1/evacuationInfo.hpp" #endif -#define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?") -#define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?") - -static GCId GCTracer_next_gc_id = 0; -static GCId create_new_gc_id() { - return GCTracer_next_gc_id++; -} +#define assert_unset_gc_id() assert(_shared_gc_info.gc_id().is_undefined(), "GC already started?") +#define assert_set_gc_id() assert(!_shared_gc_info.gc_id().is_undefined(), "GC not started?") void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) { assert_unset_gc_id(); - GCId gc_id = create_new_gc_id(); - _shared_gc_info.set_id(gc_id); + GCId gc_id = GCId::create(); + _shared_gc_info.set_gc_id(gc_id); _shared_gc_info.set_cause(cause); _shared_gc_info.set_start_timestamp(timestamp); } @@ -62,7 +58,7 @@ } bool GCTracer::has_reported_gc_start() const { - return _shared_gc_info.id() != SharedGCInfo::UNSET_GCID; + return !_shared_gc_info.gc_id().is_undefined(); } void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { @@ -81,7 +77,7 @@ report_gc_end_impl(timestamp, time_partitions); - _shared_gc_info.set_id(SharedGCInfo::UNSET_GCID); + _shared_gc_info.set_gc_id(GCId::undefined()); } void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const { @@ -132,7 +128,7 @@ if (!cit.allocation_failed()) { HeapInspection hi(false, false, false, NULL); hi.populate_table(&cit, is_alive_cl); - ObjectCountEventSenderClosure event_sender(_shared_gc_info.id(), cit.size_of_instances_in_words(), Ticks::now()); + ObjectCountEventSenderClosure event_sender(_shared_gc_info.gc_id(), cit.size_of_instances_in_words(), Ticks::now()); cit.iterate(&event_sender); } } --- ./hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -27,6 +27,7 @@ #include "gc_interface/gcCause.hpp" #include "gc_interface/gcName.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/copyFailedInfo.hpp" #include "memory/allocation.hpp" @@ -38,7 +39,6 @@ #include "utilities/macros.hpp" #include "utilities/ticks.hpp" -typedef uint GCId; class EvacuationInfo; class GCHeapSummary; @@ -50,11 +50,8 @@ class BoolObjectClosure; class SharedGCInfo VALUE_OBJ_CLASS_SPEC { - public: - static const GCId UNSET_GCID = (GCId)-1; - private: - GCId _id; + GCId _gc_id; GCName _name; GCCause::Cause _cause; Ticks _start_timestamp; @@ -64,7 +61,7 @@ public: SharedGCInfo(GCName name) : - _id(UNSET_GCID), + _gc_id(GCId::undefined()), _name(name), _cause(GCCause::_last_gc_cause), _start_timestamp(), @@ -73,8 +70,8 @@ _longest_pause() { } - void set_id(GCId id) { _id = id; } - GCId id() const { return _id; } + void set_gc_id(GCId gc_id) { _gc_id = gc_id; } + const GCId& gc_id() const { return _gc_id; } void set_start_timestamp(const Ticks& timestamp) { _start_timestamp = timestamp; } const Ticks start_timestamp() const { return _start_timestamp; } @@ -131,10 +128,11 @@ void report_gc_reference_stats(const ReferenceProcessorStats& rp) const; void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN; bool has_reported_gc_start() const; + const GCId& gc_id() { return _shared_gc_info.gc_id(); } protected: GCTracer(GCName name) : _shared_gc_info(name) {} - virtual void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp); + void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp); virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions); private: --- ./hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -43,7 +43,7 @@ void GCTracer::send_garbage_collection_event() const { EventGCGarbageCollection event(UNTIMED); if (event.should_commit()) { - event.set_gcId(_shared_gc_info.id()); + event.set_gcId(_shared_gc_info.gc_id().id()); event.set_name(_shared_gc_info.name()); event.set_cause((u2) _shared_gc_info.cause()); event.set_sumOfPauses(_shared_gc_info.sum_of_pauses()); @@ -57,7 +57,7 @@ void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { EventGCReferenceStatistics e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_type((u1)type); e.set_count(count); e.commit(); @@ -68,7 +68,7 @@ const MetaspaceChunkFreeListSummary& summary) const { EventMetaspaceChunkFreeListSummary e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_when(when); e.set_metadataType(mdtype); @@ -91,7 +91,7 @@ void ParallelOldTracer::send_parallel_old_event() const { EventGCParallelOld e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix()); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); @@ -102,7 +102,7 @@ void YoungGCTracer::send_young_gc_event() const { EventGCYoungGarbageCollection e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_tenuringThreshold(_tenuring_threshold); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); @@ -113,7 +113,7 @@ void OldGCTracer::send_old_gc_event() const { EventGCOldGarbageCollection e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); e.commit(); @@ -132,7 +132,7 @@ void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { EventPromotionFailed e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_data(to_trace_struct(pf_info)); e.set_thread(pf_info.thread()->thread_id()); e.commit(); @@ -143,7 +143,7 @@ void OldGCTracer::send_concurrent_mode_failure_event() { EventConcurrentModeFailure e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.commit(); } } @@ -152,7 +152,7 @@ void G1NewTracer::send_g1_young_gc_event() { EventGCG1GarbageCollection e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_type(_g1_young_gc_info.type()); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); @@ -163,7 +163,7 @@ void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { EventEvacuationInfo e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_cSetRegions(info->collectionset_regions()); e.set_cSetUsedBefore(info->collectionset_used_before()); e.set_cSetUsedAfter(info->collectionset_used_after()); @@ -179,7 +179,7 @@ void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { EventEvacuationFailed e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_data(to_trace_struct(ef_info)); e.commit(); } @@ -206,17 +206,17 @@ } class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { - GCId _id; + GCId _gc_id; GCWhen::Type _when; public: - GCHeapSummaryEventSender(GCId id, GCWhen::Type when) : _id(id), _when(when) {} + GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {} void visit(const GCHeapSummary* heap_summary) const { const VirtualSpaceSummary& heap_space = heap_summary->heap(); EventGCHeapSummary e; if (e.should_commit()) { - e.set_gcId(_id); + e.set_gcId(_gc_id.id()); e.set_when((u1)_when); e.set_heapSpace(to_trace_struct(heap_space)); e.set_heapUsed(heap_summary->used()); @@ -236,7 +236,7 @@ EventPSHeapSummary e; if (e.should_commit()) { - e.set_gcId(_id); + e.set_gcId(_gc_id.id()); e.set_when((u1)_when); e.set_oldSpace(to_trace_struct(ps_heap_summary->old())); @@ -251,7 +251,7 @@ }; void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const { - GCHeapSummaryEventSender visitor(_shared_gc_info.id(), when); + GCHeapSummaryEventSender visitor(_shared_gc_info.gc_id(), when); heap_summary.accept(&visitor); } @@ -268,7 +268,7 @@ void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { EventMetaspaceSummary e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_when((u1) when); e.set_gcThreshold(meta_space_summary.capacity_until_GC()); e.set_metaspace(to_trace_struct(meta_space_summary.meta_space())); @@ -287,7 +287,7 @@ void send_phase(PausePhase* pause) { T event(UNTIMED); if (event.should_commit()) { - event.set_gcId(_gc_id); + event.set_gcId(_gc_id.id()); event.set_name(pause->name()); event.set_starttime(pause->start()); event.set_endtime(pause->end()); @@ -311,7 +311,7 @@ }; void GCTracer::send_phase_events(TimePartitions* time_partitions) const { - PhaseSender phase_reporter(_shared_gc_info.id()); + PhaseSender phase_reporter(_shared_gc_info.gc_id()); TimePartitionPhasesIterator iter(time_partitions); while (iter.has_next()) { --- ./hotspot/src/share/vm/gc_implementation/shared/gcTraceTime.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/gcTraceTime.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTraceTime.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" @@ -34,7 +35,7 @@ #include "utilities/ticks.inline.hpp" -GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) : +GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) : _title(title), _doit(doit), _print_cr(print_cr), _timer(timer), _start_counter() { if (_doit || _timer != NULL) { _start_counter.stamp(); @@ -52,6 +53,9 @@ gclog_or_tty->stamp(); gclog_or_tty->print(": "); } + if (PrintGCID) { + gclog_or_tty->print("#%u: ", gc_id.id()); + } gclog_or_tty->print("[%s", title); gclog_or_tty->flush(); } --- ./hotspot/src/share/vm/gc_implementation/shared/gcTraceTime.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/gcTraceTime.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP #define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP +#include "gc_implementation/shared/gcTrace.hpp" #include "prims/jni_md.h" #include "utilities/ticks.hpp" @@ -38,7 +39,7 @@ Ticks _start_counter; public: - GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer); + GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id); ~GCTraceTime(); }; --- ./hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -49,27 +49,19 @@ SerialOldTracer* MarkSweep::_gc_tracer = NULL; MarkSweep::FollowRootClosure MarkSweep::follow_root_closure; -CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true); void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); } void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); } MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure; -MarkSweep::FollowKlassClosure MarkSweep::follow_klass_closure; -MarkSweep::AdjustKlassClosure MarkSweep::adjust_klass_closure; +CLDToOopClosure MarkSweep::follow_cld_closure(&mark_and_push_closure); +CLDToOopClosure MarkSweep::adjust_cld_closure(&adjust_pointer_closure); void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); } void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); } -void MarkSweep::FollowKlassClosure::do_klass(Klass* klass) { - klass->oops_do(&MarkSweep::mark_and_push_closure); -} -void MarkSweep::AdjustKlassClosure::do_klass(Klass* klass) { - klass->oops_do(&MarkSweep::adjust_pointer_closure); -} - void MarkSweep::follow_class_loader(ClassLoaderData* cld) { - cld->oops_do(&MarkSweep::mark_and_push_closure, &MarkSweep::follow_klass_closure, true); + MarkSweep::follow_cld_closure.do_cld(cld); } void MarkSweep::follow_stack() { --- ./hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -65,17 +65,6 @@ virtual void do_oop(narrowOop* p); }; - // The one and only place to start following the classes. - // Should only be applied to the ClassLoaderData klasses list. - class FollowKlassClosure : public KlassClosure { - public: - void do_klass(Klass* klass); - }; - class AdjustKlassClosure : public KlassClosure { - public: - void do_klass(Klass* klass); - }; - class FollowStackClosure: public VoidClosure { public: virtual void do_void(); @@ -143,12 +132,11 @@ // Public closures static IsAliveClosure is_alive; static FollowRootClosure follow_root_closure; - static CodeBlobToOopClosure follow_code_root_closure; // => follow_root_closure static MarkAndPushClosure mark_and_push_closure; - static FollowKlassClosure follow_klass_closure; static FollowStackClosure follow_stack_closure; + static CLDToOopClosure follow_cld_closure; static AdjustPointerClosure adjust_pointer_closure; - static AdjustKlassClosure adjust_klass_closure; + static CLDToOopClosure adjust_cld_closure; // Accessors static uint total_invocations() { return _total_invocations; } --- ./hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,12 +24,13 @@ #include "precompiled.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "gc_implementation/shared/objectCountEventSender.hpp" #include "memory/heapInspection.hpp" #include "trace/tracing.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" #include "utilities/ticks.hpp" - #if INCLUDE_SERVICES void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) { @@ -38,7 +39,7 @@ "Only call this method if the event is enabled"); EventObjectCountAfterGC event(UNTIMED); - event.set_gcId(gc_id); + event.set_gcId(gc_id.id()); event.set_class(entry->klass()); event.set_count(entry->count()); event.set_totalSize(entry->words() * BytesPerWord); --- ./hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,7 +24,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP - +#include "gc_interface/collectedHeap.hpp" #include "memory/allocation.hpp" #include "memory/blockOffsetTable.hpp" #include "memory/threadLocalAllocBuffer.hpp" @@ -60,6 +60,7 @@ // Initializes the buffer to be empty, but with the given "word_sz". // Must get initialized with "set_buf" for an allocation to succeed. ParGCAllocBuffer(size_t word_sz); + virtual ~ParGCAllocBuffer() {} static const size_t min_size() { // Make sure that we return something that is larger than AlignmentReserve @@ -84,6 +85,9 @@ } } + // Allocate the object aligned to "alignment_in_bytes". + HeapWord* allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes); + // Undo the last allocation in the buffer, which is required to be of the // "obj" of the given "word_sz". void undo_allocation(HeapWord* obj, size_t word_sz) { @@ -114,7 +118,7 @@ } // Sets the space of the buffer to be [buf, space+word_sz()). - void set_buf(HeapWord* buf) { + virtual void set_buf(HeapWord* buf) { _bottom = buf; _top = _bottom; _hard_end = _bottom + word_sz(); @@ -159,7 +163,7 @@ // Fills in the unallocated portion of the buffer with a garbage object. // If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain" // is true, attempt to re-use the unused portion in the next GC. - void retire(bool end_of_gc, bool retain); + virtual void retire(bool end_of_gc, bool retain); void print() PRODUCT_RETURN; }; @@ -239,14 +243,14 @@ void undo_allocation(HeapWord* obj, size_t word_sz); - void set_buf(HeapWord* buf_start) { + virtual void set_buf(HeapWord* buf_start) { ParGCAllocBuffer::set_buf(buf_start); _true_end = _hard_end; _bt.set_region(MemRegion(buf_start, word_sz())); _bt.initialize_threshold(); } - void retire(bool end_of_gc, bool retain); + virtual void retire(bool end_of_gc, bool retain); MemRegion range() { return MemRegion(_top, _true_end); --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP + +#include "gc_implementation/shared/parGCAllocBuffer.hpp" +#include "gc_interface/collectedHeap.inline.hpp" + +HeapWord* ParGCAllocBuffer::allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes) { + + HeapWord* res = CollectedHeap::align_allocation_or_fail(_top, _end, alignment_in_bytes); + if (res == NULL) { + return NULL; + } + + // Set _top so that allocate(), which expects _top to be correctly set, + // can be used below. + _top = res; + return allocate(word_sz); +} + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/shared/suspendibleThreadSet.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/suspendibleThreadSet.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/thread.inline.hpp" + +uint SuspendibleThreadSet::_nthreads = 0; +uint SuspendibleThreadSet::_nthreads_stopped = 0; +bool SuspendibleThreadSet::_suspend_all = false; +double SuspendibleThreadSet::_suspend_all_start = 0.0; + +void SuspendibleThreadSet::join() { + MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); + while (_suspend_all) { + ml.wait(Mutex::_no_safepoint_check_flag); + } + _nthreads++; +} + +void SuspendibleThreadSet::leave() { + MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); + assert(_nthreads > 0, "Invalid"); + _nthreads--; + if (_suspend_all) { + ml.notify_all(); + } +} + +void SuspendibleThreadSet::yield() { + if (_suspend_all) { + MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); + if (_suspend_all) { + _nthreads_stopped++; + if (_nthreads_stopped == _nthreads) { + if (ConcGCYieldTimeout > 0) { + double now = os::elapsedTime(); + guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay"); + } + } + ml.notify_all(); + while (_suspend_all) { + ml.wait(Mutex::_no_safepoint_check_flag); + } + assert(_nthreads_stopped > 0, "Invalid"); + _nthreads_stopped--; + ml.notify_all(); + } + } +} + +void SuspendibleThreadSet::synchronize() { + assert(Thread::current()->is_VM_thread(), "Must be the VM thread"); + if (ConcGCYieldTimeout > 0) { + _suspend_all_start = os::elapsedTime(); + } + MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); + assert(!_suspend_all, "Only one at a time"); + _suspend_all = true; + while (_nthreads_stopped < _nthreads) { + ml.wait(Mutex::_no_safepoint_check_flag); + } +} + +void SuspendibleThreadSet::desynchronize() { + assert(Thread::current()->is_VM_thread(), "Must be the VM thread"); + MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); + assert(_nthreads_stopped == _nthreads, "Invalid"); + _suspend_all = false; + ml.notify_all(); +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/gc_implementation/shared/suspendibleThreadSet.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_SUSPENDIBLETHREADSET_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_SUSPENDIBLETHREADSET_HPP + +#include "memory/allocation.hpp" + +// A SuspendibleThreadSet is a set of threads that can be suspended. +// A thread can join and later leave the set, and periodically yield. +// If some thread (not in the set) requests, via synchronize(), that +// the threads be suspended, then the requesting thread is blocked +// until all the threads in the set have yielded or left the set. Threads +// may not enter the set when an attempted suspension is in progress. The +// suspending thread later calls desynchronize(), allowing the suspended +// threads to continue. +class SuspendibleThreadSet : public AllStatic { +private: + static uint _nthreads; + static uint _nthreads_stopped; + static bool _suspend_all; + static double _suspend_all_start; + +public: + // Add the current thread to the set. May block if a suspension is in progress. + static void join(); + + // Removes the current thread from the set. + static void leave(); + + // Returns true if an suspension is in progress. + static bool should_yield() { return _suspend_all; } + + // Suspends the current thread if a suspension is in progress. + static void yield(); + + // Returns when all threads in the set are suspended. + static void synchronize(); + + // Resumes all suspended threads in the set. + static void desynchronize(); +}; + +class SuspendibleThreadSetJoiner : public StackObj { +public: + SuspendibleThreadSetJoiner() { + SuspendibleThreadSet::join(); + } + + ~SuspendibleThreadSetJoiner() { + SuspendibleThreadSet::leave(); + } + + bool should_yield() { + return SuspendibleThreadSet::should_yield(); + } + + void yield() { + SuspendibleThreadSet::yield(); + } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_SUSPENDIBLETHREADSET_HPP --- ./hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -209,6 +209,45 @@ gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); } +// Returns true iff concurrent GCs unloads metadata. +bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() { +#if INCLUDE_ALL_GCS + if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) { + MetaspaceGC::set_should_concurrent_collect(true); + return true; + } + + if (UseG1GC && ClassUnloadingWithConcurrentMark) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + g1h->g1_policy()->set_initiate_conc_mark_if_possible(); + + GCCauseSetter x(g1h, _gc_cause); + + // At this point we are supposed to start a concurrent cycle. We + // will do so if one is not already in progress. + bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause); + + if (should_start) { + double pause_target = g1h->g1_policy()->max_pause_time_ms(); + g1h->do_collection_pause_at_safepoint(pause_target); + } + return true; + } +#endif + + return false; +} + +static void log_metaspace_alloc_failure_for_concurrent_GC() { + if (Verbose && PrintGCDetails) { + if (UseConcMarkSweepGC) { + gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); + } else if (UseG1GC) { + gclog_or_tty->print_cr("\nG1 full GC for Metaspace"); + } + } +} + void VM_CollectForMetadataAllocation::doit() { SvcGCMarker sgcm(SvcGCMarker::FULL); @@ -220,54 +259,57 @@ // a GC that freed space for the allocation. if (!MetadataAllocationFailALot) { _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); - } - - if (_result == NULL) { - if (UseConcMarkSweepGC) { - if (CMSClassUnloadingEnabled) { - MetaspaceGC::set_should_concurrent_collect(true); - } - // For CMS expand since the collection is going to be concurrent. - _result = - _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); - } - if (_result == NULL) { - // Don't clear the soft refs yet. - if (Verbose && PrintGCDetails && UseConcMarkSweepGC) { - gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); - } - heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); - // After a GC try to allocate without expanding. Could fail - // and expansion will be tried below. - _result = - _loader_data->metaspace_non_null()->allocate(_size, _mdtype); - } - if (_result == NULL) { - // If still failing, allow the Metaspace to expand. - // See delta_capacity_until_GC() for explanation of the - // amount of the expansion. - // This should work unless there really is no more space - // or a MaxMetaspaceSize has been specified on the command line. - _result = - _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); - if (_result == NULL) { - // If expansion failed, do a last-ditch collection and try allocating - // again. A last-ditch collection will clear softrefs. This - // behavior is similar to the last-ditch collection done for perm - // gen when it was full and a collection for failed allocation - // did not free perm gen space. - heap->collect_as_vm_thread(GCCause::_last_ditch_collection); - _result = - _loader_data->metaspace_non_null()->allocate(_size, _mdtype); - } - } - if (Verbose && PrintGCDetails && _result == NULL) { - gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size " - SIZE_FORMAT, _size); + if (_result != NULL) { + return; } } - if (_result == NULL && GC_locker::is_active_and_needs_gc()) { + if (initiate_concurrent_GC()) { + // For CMS and G1 expand since the collection is going to be concurrent. + _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); + if (_result != NULL) { + return; + } + + log_metaspace_alloc_failure_for_concurrent_GC(); + } + + // Don't clear the soft refs yet. + heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); + // After a GC try to allocate without expanding. Could fail + // and expansion will be tried below. + _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); + if (_result != NULL) { + return; + } + + // If still failing, allow the Metaspace to expand. + // See delta_capacity_until_GC() for explanation of the + // amount of the expansion. + // This should work unless there really is no more space + // or a MaxMetaspaceSize has been specified on the command line. + _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); + if (_result != NULL) { + return; + } + + // If expansion failed, do a last-ditch collection and try allocating + // again. A last-ditch collection will clear softrefs. This + // behavior is similar to the last-ditch collection done for perm + // gen when it was full and a collection for failed allocation + // did not free perm gen space. + heap->collect_as_vm_thread(GCCause::_last_ditch_collection); + _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); + if (_result != NULL) { + return; + } + + if (Verbose && PrintGCDetails) { + gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size " + SIZE_FORMAT, _size); + } + + if (GC_locker::is_active_and_needs_gc()) { set_gc_locked(); } } --- ./hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -217,6 +217,8 @@ virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; } virtual void doit(); MetaWord* result() const { return _result; } + + bool initiate_concurrent_GC(); }; class SvcGCMarker : public StackObj { --- ./hotspot/src/share/vm/gc_interface/collectedHeap.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_interface/collectedHeap.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -558,13 +558,13 @@ void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { if (HeapDumpBeforeFullGC) { - GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer); + GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create()); // We are doing a "major" collection and a heap dump before // major collection has been requested. HeapDumper::dump_heap(); } if (PrintClassHistogramBeforeFullGC) { - GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer); + GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer, GCId::create()); VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); inspector.doit(); } @@ -572,11 +572,11 @@ void CollectedHeap::post_full_gc_dump(GCTimer* timer) { if (HeapDumpAfterFullGC) { - GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer); + GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer, GCId::create()); HeapDumper::dump_heap(); } if (PrintClassHistogramAfterFullGC) { - GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer); + GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer, GCId::create()); VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); inspector.doit(); } --- ./hotspot/src/share/vm/gc_interface/collectedHeap.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_interface/collectedHeap.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -351,6 +351,12 @@ fill_with_object(start, pointer_delta(end, start), zap); } + // Return the address "addr" aligned by "alignment_in_bytes" if such + // an address is below "end". Return NULL otherwise. + inline static HeapWord* align_allocation_or_fail(HeapWord* addr, + HeapWord* end, + unsigned short alignment_in_bytes); + // Some heaps may offer a contiguous region for shared non-blocking // allocation, via inlined code (by exporting the address of the top and // end fields defining the extent of the contiguous allocation region.) @@ -389,15 +395,6 @@ // allocation from them and necessitating allocation of new TLABs. virtual void ensure_parsability(bool retire_tlabs); - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection or expansion activity. In a - // generational collector, for example, this is probably the largest - // allocation that could be supported (without expansion) in the youngest - // generation. It is "unsafe" because no locks are taken; the result - // should be treated as an approximation, not a guarantee, for use in - // heuristic resizing decisions. - virtual size_t unsafe_max_alloc() = 0; - // Section on thread-local allocation buffers (TLABs) // If the heap supports thread-local allocation buffers, it should override // the following methods: @@ -640,6 +637,18 @@ // actual number may be germane. static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; } + // Copy the current allocation context statistics for the specified contexts. + // For each context in contexts, set the corresponding entries in the totals + // and accuracy arrays to the current values held by the statistics. Each + // array should be of length len. + // Returns true if there are more stats available. + virtual bool copy_allocation_context_stats(const jint* contexts, + jlong* totals, + jbyte* accuracy, + jint len) { + return false; + } + /////////////// Unit tests /////////////// NOT_PRODUCT(static void test_is_in();) --- ./hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -241,6 +241,44 @@ oop_iterate(&no_header_cl); } + +inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr, + HeapWord* end, + unsigned short alignment_in_bytes) { + if (alignment_in_bytes <= ObjectAlignmentInBytes) { + return addr; + } + + assert(is_ptr_aligned(addr, HeapWordSize), + err_msg("Address " PTR_FORMAT " is not properly aligned.", p2i(addr))); + assert(is_size_aligned(alignment_in_bytes, HeapWordSize), + err_msg("Alignment size %u is incorrect.", alignment_in_bytes)); + + HeapWord* new_addr = (HeapWord*) align_pointer_up(addr, alignment_in_bytes); + size_t padding = pointer_delta(new_addr, addr); + + if (padding == 0) { + return addr; + } + + if (padding < CollectedHeap::min_fill_size()) { + padding += alignment_in_bytes / HeapWordSize; + assert(padding >= CollectedHeap::min_fill_size(), + err_msg("alignment_in_bytes %u is expect to be larger " + "than the minimum object size", alignment_in_bytes)); + new_addr = addr + padding; + } + + assert(new_addr > addr, err_msg("Unexpected arithmetic overflow " + PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr))); + if(new_addr < end) { + CollectedHeap::fill_with_object(addr, padding); + return new_addr; + } else { + return NULL; + } +} + #ifndef PRODUCT inline bool --- ./hotspot/src/share/vm/gc_interface/gcCause.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_interface/gcCause.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -51,6 +51,13 @@ case _heap_dump: return "Heap Dump Initiated GC"; + case _wb_young_gc: + return "WhiteBox Initiated Young GC"; + + case _update_allocation_context_stats_inc: + case _update_allocation_context_stats_full: + return "Update Allocation Context Stats"; + case _no_gc: return "No GC"; --- ./hotspot/src/share/vm/gc_interface/gcCause.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/gc_interface/gcCause.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -46,6 +46,9 @@ _gc_locker, _heap_inspection, _heap_dump, + _wb_young_gc, + _update_allocation_context_stats_inc, + _update_allocation_context_stats_full, /* implementation independent, but reserved for GC use */ _no_gc, --- ./hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,43 +41,10 @@ #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/threadCritical.hpp" #include "utilities/exceptions.hpp" -#ifdef TARGET_OS_ARCH_linux_x86 -# include "orderAccess_linux_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_sparc -# include "orderAccess_linux_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_zero -# include "orderAccess_linux_zero.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_x86 -# include "orderAccess_solaris_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_sparc -# include "orderAccess_solaris_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_windows_x86 -# include "orderAccess_windows_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_arm -# include "orderAccess_linux_arm.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_ppc -# include "orderAccess_linux_ppc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_aix_ppc -# include "orderAccess_aix_ppc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_x86 -# include "orderAccess_bsd_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_zero -# include "orderAccess_bsd_zero.inline.hpp" -#endif - // no precompiled headers #ifdef CC_INTERP @@ -2846,11 +2813,11 @@ if (TraceExceptions) { ttyLocker ttyl; ResourceMark rm; - tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop()); + tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop())); tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, - istate->bcp() - (intptr_t)METHOD->code_base(), - continuation_bci, THREAD); + (int)(istate->bcp() - METHOD->code_base()), + (int)continuation_bci, p2i(THREAD)); } // for AbortVMOnException flag NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); @@ -2862,11 +2829,11 @@ if (TraceExceptions) { ttyLocker ttyl; ResourceMark rm; - tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop()); + tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop())); tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, - istate->bcp() - (intptr_t)METHOD->code_base(), - THREAD); + (int)(istate->bcp() - METHOD->code_base()), + p2i(THREAD)); } // for AbortVMOnException flag NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); @@ -3465,7 +3432,7 @@ tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); - tty->print_cr("native_mirror: " INTPTR_FORMAT, (void*) this->_oop_temp); + tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp); tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); --- ./hotspot/src/share/vm/interpreter/bytecodeInterpreterProfiling.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/interpreter/bytecodeInterpreterProfiling.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,6 +1,6 @@ /* - * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. - * Copyright 2012, 2013 SAP AG. All rights reserved. + * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,11 +86,11 @@ " \t-> " PTR_FORMAT "(%d)", \ (int) THREAD->osthread()->thread_id(), \ BCI(), \ - MDX(), \ + p2i(MDX()), \ (MDX() == NULL \ ? 0 \ : istate->method()->method_data()->dp_to_di((address)MDX())), \ - mdx, \ + p2i(mdx), \ istate->method()->method_data()->dp_to_di((address)mdx) \ ); \ }; \ @@ -107,7 +107,7 @@ MethodData *md = istate->method()->method_data(); \ tty->cr(); \ tty->print("method data at mdx " PTR_FORMAT "(0) for", \ - md->data_layout_at(md->bci_to_di(0))); \ + p2i(md->data_layout_at(md->bci_to_di(0)))); \ istate->method()->print_short_name(tty); \ tty->cr(); \ if (md != NULL) { \ @@ -115,7 +115,7 @@ address mdx = (address) MDX(); \ if (mdx != NULL) { \ tty->print_cr("current mdx " PTR_FORMAT "(%d)", \ - mdx, \ + p2i(mdx), \ istate->method()->method_data()->dp_to_di(mdx)); \ } \ } else { \ --- ./hotspot/src/share/vm/interpreter/bytecodes.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/interpreter/bytecodes.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -420,8 +420,10 @@ static bool is_astore (Code code) { return (code == _astore || code == _astore_0 || code == _astore_1 || code == _astore_2 || code == _astore_3); } + static bool is_const (Code code) { return (_aconst_null <= code && code <= _ldc2_w); } static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0 || code == _fconst_0 || code == _dconst_0); } + static bool is_return (Code code) { return (_ireturn <= code && code <= _return); } static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); } static bool has_receiver (Code code) { assert(is_invoke(code), ""); return code == _invokevirtual || code == _invokespecial || --- ./hotspot/src/share/vm/interpreter/interpreter.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/interpreter/interpreter.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -53,7 +53,9 @@ public: // Initialization/finalization void initialize(int size, - CodeStrings& strings) { _size = size; DEBUG_ONLY(_strings.assign(strings);) } + CodeStrings& strings) { _size = size; + DEBUG_ONLY(::new(&_strings) CodeStrings();) + DEBUG_ONLY(_strings.assign(strings);) } void finalize() { ShouldNotCallThis(); } // General info/converters --- ./hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -398,6 +398,18 @@ int handler_bci; int current_bci = bci(thread); + if (thread->frames_to_pop_failed_realloc() > 0) { + // Allocation of scalar replaced object used in this frame + // failed. Unconditionally pop the frame. + thread->dec_frames_to_pop_failed_realloc(); + thread->set_vm_result(h_exception()); + // If the method is synchronized we already unlocked the monitor + // during deoptimization so the interpreter needs to skip it when + // the frame is popped. + thread->set_do_not_unlock_if_synchronized(true); + return Interpreter::remove_activation_entry(); + } + // Need to do this check first since when _do_not_unlock_if_synchronized // is set, we don't want to trigger any classloading which may make calls // into java, or surprisingly find a matching exception handler for bci 0 @@ -1269,8 +1281,10 @@ // This is a support of the JVMTI PopFrame interface. // Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument // and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters. -// The dmh argument is a reference to a DirectMethoHandle that has a member name field. -IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address dmh, +// The member_name argument is a saved reference (in local#0) to the member_name. +// For backward compatibility with some JDK versions (7, 8) it can also be a direct method handle. +// FIXME: remove DMH case after j.l.i.InvokerBytecodeGenerator code shape is updated. +IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address member_name, Method* method, address bcp)) Bytecodes::Code code = Bytecodes::code_at(method, bcp); if (code != Bytecodes::_invokestatic) { @@ -1282,8 +1296,12 @@ Symbol* mname = cpool->name_ref_at(cp_index); if (MethodHandles::has_member_arg(cname, mname)) { - oop member_name = java_lang_invoke_DirectMethodHandle::member((oop)dmh); - thread->set_vm_result(member_name); + oop member_name_oop = (oop) member_name; + if (java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop)) { + // FIXME: remove after j.l.i.InvokerBytecodeGenerator code shape is updated. + member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop); + } + thread->set_vm_result(member_name_oop); } IRT_END #endif // INCLUDE_JVMTI --- ./hotspot/src/share/vm/interpreter/oopMapCache.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/interpreter/oopMapCache.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -180,7 +180,7 @@ } } -bool InterpreterOopMap::is_empty() { +bool InterpreterOopMap::is_empty() const { bool result = _method == NULL; assert(_method != NULL || (_bci == 0 && (_mask_size == 0 || _mask_size == USHRT_MAX) && @@ -196,7 +196,7 @@ for (int i = 0; i < N; i++) _bit_mask[i] = 0; } -void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) { +void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) const { int n = number_of_entries(); int word_index = 0; uintptr_t value = 0; @@ -238,7 +238,7 @@ #endif -void InterpreterOopMap::print() { +void InterpreterOopMap::print() const { int n = number_of_entries(); tty->print("oop map for "); method()->print_value(); @@ -469,7 +469,7 @@ } } -inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) { +inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) const { // We use method->code_size() rather than method->identity_hash() below since // the mark may not be present if a pointer to the method is already reversed. return ((unsigned int) bci) @@ -522,7 +522,7 @@ void OopMapCache::lookup(methodHandle method, int bci, - InterpreterOopMap* entry_for) { + InterpreterOopMap* entry_for) const { MutexLocker x(&_mut); OopMapCacheEntry* entry = NULL; --- ./hotspot/src/share/vm/interpreter/oopMapCache.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/interpreter/oopMapCache.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -101,32 +101,31 @@ // access methods Method* method() const { return _method; } - void set_method(Method* v) { _method = v; } + void set_method(Method* v) { _method = v; } int bci() const { return _bci; } void set_bci(int v) { _bci = v; } int mask_size() const { return _mask_size; } void set_mask_size(int v) { _mask_size = v; } - int number_of_entries() const { return mask_size() / bits_per_entry; } // Test bit mask size and return either the in-line bit mask or allocated // bit mask. - uintptr_t* bit_mask() { return (uintptr_t*)(mask_size() <= small_mask_limit ? (intptr_t)_bit_mask : _bit_mask[0]); } + uintptr_t* bit_mask() const { return (uintptr_t*)(mask_size() <= small_mask_limit ? (intptr_t)_bit_mask : _bit_mask[0]); } // return the word size of_bit_mask. mask_size() <= 4 * MAX_USHORT - size_t mask_word_size() { + size_t mask_word_size() const { return (mask_size() + BitsPerWord - 1) / BitsPerWord; } - uintptr_t entry_at(int offset) { int i = offset * bits_per_entry; return bit_mask()[i / BitsPerWord] >> (i % BitsPerWord); } + uintptr_t entry_at(int offset) const { int i = offset * bits_per_entry; return bit_mask()[i / BitsPerWord] >> (i % BitsPerWord); } - void set_expression_stack_size(int sz) { _expression_stack_size = sz; } + void set_expression_stack_size(int sz) { _expression_stack_size = sz; } #ifdef ENABLE_ZAP_DEAD_LOCALS - bool is_dead(int offset) { return (entry_at(offset) & (1 << dead_bit_number)) != 0; } + bool is_dead(int offset) const { return (entry_at(offset) & (1 << dead_bit_number)) != 0; } #endif // Lookup - bool match(methodHandle method, int bci) { return _method == method() && _bci == bci; } - bool is_empty(); + bool match(methodHandle method, int bci) const { return _method == method() && _bci == bci; } + bool is_empty() const; // Initialization void initialize(); @@ -141,12 +140,13 @@ // in-line), allocate the space from a Resource area. void resource_copy(OopMapCacheEntry* from); - void iterate_oop(OffsetClosure* oop_closure); - void print(); + void iterate_oop(OffsetClosure* oop_closure) const; + void print() const; - bool is_oop (int offset) { return (entry_at(offset) & (1 << oop_bit_number )) != 0; } + int number_of_entries() const { return mask_size() / bits_per_entry; } + bool is_oop (int offset) const { return (entry_at(offset) & (1 << oop_bit_number )) != 0; } - int expression_stack_size() { return _expression_stack_size; } + int expression_stack_size() const { return _expression_stack_size; } #ifdef ENABLE_ZAP_DEAD_LOCALS void iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure); @@ -161,10 +161,10 @@ OopMapCacheEntry* _array; - unsigned int hash_value_for(methodHandle method, int bci); + unsigned int hash_value_for(methodHandle method, int bci) const; OopMapCacheEntry* entry_at(int i) const; - Mutex _mut; + mutable Mutex _mut; void flush(); @@ -177,7 +177,7 @@ // Returns the oopMap for (method, bci) in parameter "entry". // Returns false if an oop map was not found. - void lookup(methodHandle method, int bci, InterpreterOopMap* entry); + void lookup(methodHandle method, int bci, InterpreterOopMap* entry) const; // Compute an oop map without updating the cache or grabbing any locks (for debugging) static void compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry); --- ./hotspot/src/share/vm/memory/allocation.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/allocation.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -438,24 +438,22 @@ } //------------------------------Arena------------------------------------------ -NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) - -Arena::Arena(size_t init_size) { +Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) { size_t round_size = (sizeof (char *)) - 1; init_size = (init_size+round_size) & ~round_size; _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size); _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); + MemTracker::record_new_arena(flag); set_size_in_bytes(init_size); - NOT_PRODUCT(Atomic::inc(&_instance_count);) } -Arena::Arena() { +Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) { _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size); _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); + MemTracker::record_new_arena(flag); set_size_in_bytes(Chunk::init_size); - NOT_PRODUCT(Atomic::inc(&_instance_count);) } Arena *Arena::move_contents(Arena *copy) { @@ -477,7 +475,7 @@ Arena::~Arena() { destruct_contents(); - NOT_PRODUCT(Atomic::dec(&_instance_count);) + MemTracker::record_arena_free(_flags); } void* Arena::operator new(size_t size) throw() { @@ -493,21 +491,21 @@ // dynamic memory type binding void* Arena::operator new(size_t size, MEMFLAGS flags) throw() { #ifdef ASSERT - void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); + void* p = (void*)AllocateHeap(size, flags, CALLER_PC); if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); return p; #else - return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); + return (void *) AllocateHeap(size, flags, CALLER_PC); #endif } void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() { #ifdef ASSERT - void* p = os::malloc(size, flags|otArena, CALLER_PC); + void* p = os::malloc(size, flags, CALLER_PC); if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); return p; #else - return os::malloc(size, flags|otArena, CALLER_PC); + return os::malloc(size, flags, CALLER_PC); #endif } @@ -532,8 +530,9 @@ // change the size void Arena::set_size_in_bytes(size_t size) { if (_size_in_bytes != size) { + long delta = (long)(size - size_in_bytes()); _size_in_bytes = size; - MemTracker::record_arena_size((address)this, size); + MemTracker::record_arena_size_change(delta, _flags); } } --- ./hotspot/src/share/vm/memory/allocation.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/allocation.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -133,51 +133,34 @@ /* - * MemoryType bitmap layout: - * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 | - * | memory type | object | reserved | - * | | type | | + * Memory types */ enum MemoryType { // Memory type by sub systems. It occupies lower byte. - mtNone = 0x0000, // undefined - mtClass = 0x0100, // memory class for Java classes - mtThread = 0x0200, // memory for thread objects - mtThreadStack = 0x0300, - mtCode = 0x0400, // memory for generated code - mtGC = 0x0500, // memory for GC - mtCompiler = 0x0600, // memory for compiler - mtInternal = 0x0700, // memory used by VM, but does not belong to + mtJavaHeap = 0x00, // Java heap + mtClass = 0x01, // memory class for Java classes + mtThread = 0x02, // memory for thread objects + mtThreadStack = 0x03, + mtCode = 0x04, // memory for generated code + mtGC = 0x05, // memory for GC + mtCompiler = 0x06, // memory for compiler + mtInternal = 0x07, // memory used by VM, but does not belong to // any of above categories, and not used for // native memory tracking - mtOther = 0x0800, // memory not used by VM - mtSymbol = 0x0900, // symbol - mtNMT = 0x0A00, // memory used by native memory tracking - mtChunk = 0x0B00, // chunk that holds content of arenas - mtJavaHeap = 0x0C00, // Java heap - mtClassShared = 0x0D00, // class data sharing - mtTest = 0x0E00, // Test type for verifying NMT - mtTracing = 0x0F00, // memory used for Tracing - mt_number_of_types = 0x000F, // number of memory types (mtDontTrack + mtOther = 0x08, // memory not used by VM + mtSymbol = 0x09, // symbol + mtNMT = 0x0A, // memory used by native memory tracking + mtClassShared = 0x0B, // class data sharing + mtChunk = 0x0C, // chunk that holds content of arenas + mtTest = 0x0D, // Test type for verifying NMT + mtTracing = 0x0E, // memory used for Tracing + mtNone = 0x0F, // undefined + mt_number_of_types = 0x10 // number of memory types (mtDontTrack // is not included as validate type) - mtDontTrack = 0x0F00, // memory we do not or cannot track - mt_masks = 0x7F00, - - // object type mask - otArena = 0x0010, // an arena object - otNMTRecorder = 0x0020, // memory recorder object - ot_masks = 0x00F0 }; -#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type) -#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone) -#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks) +typedef MemoryType MEMFLAGS; -#define IS_ARENA_OBJ(flags) ((flags & ot_masks) == otArena) -#define IS_NMT_RECORDER(flags) ((flags & ot_masks) == otNMTRecorder) -#define NMT_CAN_TRACK(flags) (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack))) - -typedef unsigned short MEMFLAGS; #if INCLUDE_NMT @@ -189,27 +172,23 @@ #endif // INCLUDE_NMT -// debug build does not inline -#if defined(_NMT_NOINLINE_) - #define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) - #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) - #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0) -#else - #define CURRENT_PC (NMT_track_callsite? os::get_caller_pc(0) : 0) - #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) - #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) -#endif - +class NativeCallStack; template class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { public: - _NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw(); + _NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw(); + _NOINLINE_ void* operator new(size_t size) throw(); _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, - address caller_pc = 0) throw(); - _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw(); + const NativeCallStack& stack) throw(); + _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant) + throw(); + _NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw(); + _NOINLINE_ void* operator new [](size_t size) throw(); _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, - address caller_pc = 0) throw(); + const NativeCallStack& stack) throw(); + _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) + throw(); void operator delete(void* p); void operator delete [] (void* p); }; @@ -286,7 +265,8 @@ f(ConstantPool) \ f(ConstantPoolCache) \ f(Annotation) \ - f(MethodCounters) + f(MethodCounters) \ + f(Deallocated) #define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type, #define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name; @@ -384,13 +364,15 @@ //------------------------------Arena------------------------------------------ // Fast allocation of memory -class Arena : public CHeapObj { +class Arena : public CHeapObj { protected: friend class ResourceMark; friend class HandleMark; friend class NoHandleMark; friend class VMStructs; + MEMFLAGS _flags; // Memory tracking flags + Chunk *_first; // First chunk Chunk *_chunk; // current chunk char *_hwm, *_max; // High water mark and max in current chunk @@ -418,8 +400,8 @@ } public: - Arena(); - Arena(size_t init_size); + Arena(MEMFLAGS memflag); + Arena(MEMFLAGS memflag, size_t init_size); ~Arena(); void destruct_contents(); char* hwm() const { return _hwm; } @@ -518,8 +500,6 @@ static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN; static void free_all(char** start, char** end) PRODUCT_RETURN; - // how many arena instances - NOT_PRODUCT(static volatile jint _instance_count;) private: // Reset this Arena to empty, access will trigger grow if necessary void reset(void) { @@ -681,7 +661,7 @@ NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL) #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\ - NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL) + NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL) #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags)) --- ./hotspot/src/share/vm/memory/allocation.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/allocation.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "runtime/atomic.inline.hpp" #include "runtime/os.hpp" +#include "services/memTracker.hpp" // Explicit C-heap memory management @@ -49,12 +50,10 @@ #endif // allocate using malloc; will fail if no memory available -inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0, +inline char* AllocateHeap(size_t size, MEMFLAGS flags, + const NativeCallStack& stack, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { - if (pc == 0) { - pc = CURRENT_PC; - } - char* p = (char*) os::malloc(size, flags, pc); + char* p = (char*) os::malloc(size, flags, stack); #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p); #endif @@ -63,10 +62,14 @@ } return p; } +inline char* AllocateHeap(size_t size, MEMFLAGS flags, + AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { + return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode); +} -inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags, +inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { - char* p = (char*) os::realloc(old, size, flags, CURRENT_PC); + char* p = (char*) os::realloc(old, size, flag, CURRENT_PC); #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p); #endif @@ -85,8 +88,22 @@ template void* CHeapObj::operator new(size_t size, - address caller_pc) throw() { - void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC)); + const NativeCallStack& stack) throw() { + void* p = (void*)AllocateHeap(size, F, stack); +#ifdef ASSERT + if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); +#endif + return p; +} + +template void* CHeapObj::operator new(size_t size) throw() { + return CHeapObj::operator new(size, CALLER_PC); +} + +template void* CHeapObj::operator new (size_t size, + const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() { + void* p = (void*)AllocateHeap(size, F, stack, + AllocFailStrategy::RETURN_NULL); #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); #endif @@ -94,23 +111,28 @@ } template void* CHeapObj::operator new (size_t size, - const std::nothrow_t& nothrow_constant, address caller_pc) throw() { - void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC), - AllocFailStrategy::RETURN_NULL); -#ifdef ASSERT - if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); -#endif - return p; + const std::nothrow_t& nothrow_constant) throw() { + return CHeapObj::operator new(size, nothrow_constant, CALLER_PC); } template void* CHeapObj::operator new [](size_t size, - address caller_pc) throw() { - return CHeapObj::operator new(size, caller_pc); + const NativeCallStack& stack) throw() { + return CHeapObj::operator new(size, stack); +} + +template void* CHeapObj::operator new [](size_t size) + throw() { + return CHeapObj::operator new(size, CALLER_PC); } template void* CHeapObj::operator new [](size_t size, - const std::nothrow_t& nothrow_constant, address caller_pc) throw() { - return CHeapObj::operator new(size, nothrow_constant, caller_pc); + const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() { + return CHeapObj::operator new(size, nothrow_constant, stack); +} + +template void* CHeapObj::operator new [](size_t size, + const std::nothrow_t& nothrow_constant) throw() { + return CHeapObj::operator new(size, nothrow_constant, CALLER_PC); } template void CHeapObj::operator delete(void* p){ --- ./hotspot/src/share/vm/memory/cardTableModRefBS.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/cardTableModRefBS.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -44,13 +44,6 @@ // enumerate ref fields that have been modified (since the last // enumeration.) -size_t CardTableModRefBS::cards_required(size_t covered_words) -{ - // Add one for a guard card, used to detect errors. - const size_t words = align_size_up(covered_words, card_size_in_words); - return words / card_size_in_words + 1; -} - size_t CardTableModRefBS::compute_byte_map_size() { assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, @@ -64,27 +57,50 @@ int max_covered_regions): ModRefBarrierSet(max_covered_regions), _whole_heap(whole_heap), - _guard_index(cards_required(whole_heap.word_size()) - 1), - _last_valid_index(_guard_index - 1), + _guard_index(0), + _guard_region(), + _last_valid_index(0), _page_size(os::vm_page_size()), - _byte_map_size(compute_byte_map_size()) + _byte_map_size(0), + _covered(NULL), + _committed(NULL), + _cur_covered_regions(0), + _byte_map(NULL), + byte_map_base(NULL), + // LNC functionality + _lowest_non_clean(NULL), + _lowest_non_clean_chunk_size(NULL), + _lowest_non_clean_base_chunk_index(NULL), + _last_LNC_resizing_collection(NULL) { _kind = BarrierSet::CardTableModRef; - HeapWord* low_bound = _whole_heap.start(); - HeapWord* high_bound = _whole_heap.end(); - assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary"); - assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary"); + assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); + assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); assert(card_size <= 512, "card_size must be less than 512"); // why? - _covered = new MemRegion[max_covered_regions]; - _committed = new MemRegion[max_covered_regions]; - if (_covered == NULL || _committed == NULL) { - vm_exit_during_initialization("couldn't alloc card table covered region set."); + _covered = new MemRegion[_max_covered_regions]; + if (_covered == NULL) { + vm_exit_during_initialization("Could not allocate card table covered region set."); + } +} + +void CardTableModRefBS::initialize() { + _guard_index = cards_required(_whole_heap.word_size()) - 1; + _last_valid_index = _guard_index - 1; + + _byte_map_size = compute_byte_map_size(); + + HeapWord* low_bound = _whole_heap.start(); + HeapWord* high_bound = _whole_heap.end(); + + _cur_covered_regions = 0; + _committed = new MemRegion[_max_covered_regions]; + if (_committed == NULL) { + vm_exit_during_initialization("Could not allocate card table committed region set."); } - _cur_covered_regions = 0; const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : MAX2(_page_size, (size_t) os::vm_allocation_granularity()); ReservedSpace heap_rs(_byte_map_size, rs_align, false); @@ -114,20 +130,20 @@ !ExecMem, "card table last card"); *guard_card = last_card; - _lowest_non_clean = - NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC); + _lowest_non_clean = + NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC); _lowest_non_clean_chunk_size = - NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC); + NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC); _lowest_non_clean_base_chunk_index = - NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC); + NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); _last_LNC_resizing_collection = - NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC); + NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); if (_lowest_non_clean == NULL || _lowest_non_clean_chunk_size == NULL || _lowest_non_clean_base_chunk_index == NULL || _last_LNC_resizing_collection == NULL) vm_exit_during_initialization("couldn't allocate an LNC array."); - for (int i = 0; i < max_covered_regions; i++) { + for (int i = 0; i < _max_covered_regions; i++) { _lowest_non_clean[i] = NULL; _lowest_non_clean_chunk_size[i] = 0; _last_LNC_resizing_collection[i] = -1; @@ -429,7 +445,7 @@ OopsInGenClosure* cl, CardTableRS* ct) { if (!mr.is_empty()) { - // Caller (process_strong_roots()) claims that all GC threads + // Caller (process_roots()) claims that all GC threads // execute this call. With UseDynamicNumberOfGCThreads now all // active GC threads execute this call. The number of active GC // threads needs to be passed to par_non_clean_card_iterate_work() @@ -438,7 +454,7 @@ // This is an example of where n_par_threads() is used instead // of workers()->active_workers(). n_par_threads can be set to 0 to // turn off parallelism. For example when this code is called as - // part of verification and SharedHeap::process_strong_roots() is being + // part of verification and SharedHeap::process_roots() is being // used, then n_par_threads() may have been set to 0. active_workers // is not overloaded with the meaning that it is a switch to disable // parallelism and so keeps the meaning of the number of @@ -650,7 +666,7 @@ jbyte val, bool val_equals) { jbyte* start = byte_for(mr.start()); jbyte* end = byte_for(mr.last()); - bool failures = false; + bool failures = false; for (jbyte* curr = start; curr <= end; ++curr) { jbyte curr_val = *curr; bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); --- ./hotspot/src/share/vm/memory/cardTableModRefBS.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/cardTableModRefBS.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -96,12 +96,12 @@ // The declaration order of these const fields is important; see the // constructor before changing. const MemRegion _whole_heap; // the region covered by the card table - const size_t _guard_index; // index of very last element in the card + size_t _guard_index; // index of very last element in the card // table; it is set to a guard value // (last_card) and should never be modified - const size_t _last_valid_index; // index of the last valid element + size_t _last_valid_index; // index of the last valid element const size_t _page_size; // page size used when mapping _byte_map - const size_t _byte_map_size; // in bytes + size_t _byte_map_size; // in bytes jbyte* _byte_map; // the card marking array int _cur_covered_regions; @@ -123,7 +123,12 @@ protected: // Initialization utilities; covered_words is the size of the covered region // in, um, words. - inline size_t cards_required(size_t covered_words); + inline size_t cards_required(size_t covered_words) { + // Add one for a guard card, used to detect errors. + const size_t words = align_size_up(covered_words, card_size_in_words); + return words / card_size_in_words + 1; + } + inline size_t compute_byte_map_size(); // Finds and return the index of the region, if any, to which the given @@ -137,7 +142,7 @@ int find_covering_region_containing(HeapWord* addr); // Resize one of the regions covered by the remembered set. - void resize_covered_region(MemRegion new_region); + virtual void resize_covered_region(MemRegion new_region); // Returns the leftmost end of a committed region corresponding to a // covered region before covered region "ind", or else "NULL" if "ind" is @@ -282,6 +287,8 @@ CardTableModRefBS(MemRegion whole_heap, int max_covered_regions); ~CardTableModRefBS(); + virtual void initialize(); + // *** Barrier set functions. bool has_write_ref_pre_barrier() { return false; } --- ./hotspot/src/share/vm/memory/cardTableRS.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/cardTableRS.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -53,9 +53,10 @@ #else _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); #endif + _ct_bs->initialize(); set_bs(_ct_bs); _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1, - mtGC, 0, AllocFailStrategy::RETURN_NULL); + mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); if (_last_cur_val_in_gen == NULL) { vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); } --- ./hotspot/src/share/vm/memory/collectorPolicy.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/collectorPolicy.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -183,13 +183,9 @@ // Requirements of any new remembered set implementations must be added here. size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); - // Parallel GC does its own alignment of the generations to avoid requiring a - // large page (256M on some platforms) for the permanent generation. The - // other collectors should also be updated to do their own alignment and then - // this use of lcm() should be removed. - if (UseLargePages && !UseParallelGC) { - // in presence of large pages we have to make sure that our - // alignment is large page aware + if (UseLargePages) { + // In presence of large pages we have to make sure that our + // alignment is large page aware. alignment = lcm(os::large_page_size(), alignment); } @@ -969,7 +965,8 @@ } void MarkSweepPolicy::initialize_generations() { - _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); + _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC, + AllocFailStrategy::RETURN_NULL); if (_generations == NULL) { vm_exit_during_initialization("Unable to allocate gen spec"); } --- ./hotspot/src/share/vm/memory/defNewGeneration.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/defNewGeneration.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -42,6 +42,7 @@ #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" +#include "runtime/prefetch.inline.hpp" #include "runtime/thread.inline.hpp" #include "utilities/copy.hpp" #include "utilities/stack.inline.hpp" @@ -584,7 +585,7 @@ init_assuming_no_promotion_failure(); - GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); @@ -612,6 +613,9 @@ KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, gch->rem_set()->klass_rem_set()); + CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, + &fsc_with_no_gc_barrier, + false); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, @@ -621,18 +625,15 @@ assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); - int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; - - gch->gen_process_strong_roots(_level, - true, // Process younger gens, if any, - // as strong roots. - true, // activate StrongRootsScope - true, // is scavenging - SharedHeap::ScanningOption(so), - &fsc_with_no_gc_barrier, - true, // walk *all* scavengable nmethods - &fsc_with_gc_barrier, - &klass_scan_closure); + gch->gen_process_roots(_level, + true, // Process younger gens, if any, + // as strong roots. + true, // activate StrongRootsScope + SharedHeap::SO_ScavengeCodeCache, + GenCollectedHeap::StrongAndWeakRoots, + &fsc_with_no_gc_barrier, + &fsc_with_gc_barrier, + &cld_scan_closure); // "evacuate followers". evacuate_followers.do_void(); @@ -642,7 +643,7 @@ rp->setup_policy(clear_all_soft_refs); const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, - NULL, _gc_timer); + NULL, _gc_timer, gc_tracer.gc_id()); gc_tracer.report_gc_reference_stats(stats); if (!_promotion_failed) { @@ -788,7 +789,7 @@ // Try allocating obj in to-space (unless too old) if (old->age() < tenuring_threshold()) { - obj = (oop) to()->allocate(s); + obj = (oop) to()->allocate_aligned(s); } // Otherwise try allocating obj tenured --- ./hotspot/src/share/vm/memory/filemap.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/filemap.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,9 +24,14 @@ #include "precompiled.hpp" #include "classfile/classLoader.hpp" +#include "classfile/sharedClassUtil.hpp" #include "classfile/symbolTable.hpp" +#include "classfile/systemDictionaryShared.hpp" #include "classfile/altHashing.hpp" #include "memory/filemap.hpp" +#include "memory/metadataFactory.hpp" +#include "memory/oopFactory.hpp" +#include "oops/objArrayOop.hpp" #include "runtime/arguments.hpp" #include "runtime/java.hpp" #include "runtime/os.hpp" @@ -41,7 +46,6 @@ #endif PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC - extern address JVM_FunctionAtStart(); extern address JVM_FunctionAtEnd(); @@ -77,12 +81,27 @@ void FileMapInfo::fail_continue(const char *msg, ...) { va_list ap; va_start(ap, msg); - if (RequireSharedSpaces) { - fail(msg, ap); + MetaspaceShared::set_archive_loading_failed(); + if (PrintSharedArchiveAndExit && _validating_classpath_entry_table) { + // If we are doing PrintSharedArchiveAndExit and some of the classpath entries + // do not validate, we can still continue "limping" to validate the remaining + // entries. No need to quit. + tty->print("["); + tty->vprint(msg, ap); + tty->print_cr("]"); + } else { + if (RequireSharedSpaces) { + fail(msg, ap); + } else { + if (PrintSharedSpaces) { + tty->print_cr("UseSharedSpaces: %s", msg); + } + } + UseSharedSpaces = false; + assert(current_info() != NULL, "singleton must be registered"); + current_info()->close(); } va_end(ap); - UseSharedSpaces = false; - close(); } // Fill in the fileMapInfo structure with data about this VM instance. @@ -117,74 +136,208 @@ } } +FileMapInfo::FileMapInfo() { + assert(_current_info == NULL, "must be singleton"); // not thread safe + _current_info = this; + memset(this, 0, sizeof(FileMapInfo)); + _file_offset = 0; + _file_open = false; + _header = SharedClassUtil::allocate_file_map_header(); + _header->_version = _invalid_version; +} + +FileMapInfo::~FileMapInfo() { + assert(_current_info == this, "must be singleton"); // not thread safe + _current_info = NULL; +} + void FileMapInfo::populate_header(size_t alignment) { - _header._magic = 0xf00baba2; - _header._version = _current_version; - _header._alignment = alignment; - _header._obj_alignment = ObjectAlignmentInBytes; + _header->populate(this, alignment); +} + +size_t FileMapInfo::FileMapHeader::data_size() { + return SharedClassUtil::file_map_header_size() - sizeof(FileMapInfo::FileMapHeaderBase); +} + +void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment) { + _magic = 0xf00baba2; + _version = _current_version; + _alignment = alignment; + _obj_alignment = ObjectAlignmentInBytes; + _classpath_entry_table_size = mapinfo->_classpath_entry_table_size; + _classpath_entry_table = mapinfo->_classpath_entry_table; + _classpath_entry_size = mapinfo->_classpath_entry_size; // The following fields are for sanity checks for whether this archive // will function correctly with this JVM and the bootclasspath it's // invoked with. // JVM version string ... changes on each build. - get_header_version(_header._jvm_ident); + get_header_version(_jvm_ident); +} - // Build checks on classpath and jar files - _header._num_jars = 0; - ClassPathEntry *cpe = ClassLoader::classpath_entry(0); - for ( ; cpe != NULL; cpe = cpe->next()) { +void FileMapInfo::allocate_classpath_entry_table() { + int bytes = 0; + int count = 0; + char* strptr = NULL; + char* strptr_max = NULL; + Thread* THREAD = Thread::current(); - if (cpe->is_jar_file()) { - if (_header._num_jars >= JVM_SHARED_JARS_MAX) { - fail_stop("Too many jar files to share.", NULL); + ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data(); + size_t entry_size = SharedClassUtil::shared_class_path_entry_size(); + + for (int pass=0; pass<2; pass++) { + ClassPathEntry *cpe = ClassLoader::classpath_entry(0); + + for (int cur_entry = 0 ; cpe != NULL; cpe = cpe->next(), cur_entry++) { + const char *name = cpe->name(); + int name_bytes = (int)(strlen(name) + 1); + + if (pass == 0) { + count ++; + bytes += (int)entry_size; + bytes += name_bytes; + if (TraceClassPaths || (TraceClassLoading && Verbose)) { + tty->print_cr("[Add main shared path (%s) %s]", (cpe->is_jar_file() ? "jar" : "dir"), name); + } + } else { + SharedClassPathEntry* ent = shared_classpath(cur_entry); + if (cpe->is_jar_file()) { + struct stat st; + if (os::stat(name, &st) != 0) { + // The file/dir must exist, or it would not have been added + // into ClassLoader::classpath_entry(). + // + // If we can't access a jar file in the boot path, then we can't + // make assumptions about where classes get loaded from. + FileMapInfo::fail_stop("Unable to open jar file %s.", name); + } + + EXCEPTION_MARK; // The following call should never throw, but would exit VM on error. + SharedClassUtil::update_shared_classpath(cpe, ent, st.st_mtime, st.st_size, THREAD); + } else { + ent->_filesize = -1; + if (!os::dir_is_empty(name)) { + ClassLoader::exit_with_path_failure("Cannot have non-empty directory in archived classpaths", name); + } + } + ent->_name = strptr; + if (strptr + name_bytes <= strptr_max) { + strncpy(strptr, name, (size_t)name_bytes); // name_bytes includes trailing 0. + strptr += name_bytes; + } else { + assert(0, "miscalculated buffer size"); + } } + } - // Jar file - record timestamp and file size. - struct stat st; - const char *path = cpe->name(); - if (os::stat(path, &st) != 0) { - // If we can't access a jar file in the boot path, then we can't - // make assumptions about where classes get loaded from. - fail_stop("Unable to open jar file %s.", path); - } - _header._jar[_header._num_jars]._timestamp = st.st_mtime; - _header._jar[_header._num_jars]._filesize = st.st_size; - _header._num_jars++; - } else { + if (pass == 0) { + EXCEPTION_MARK; // The following call should never throw, but would exit VM on error. + Array* arr = MetadataFactory::new_array(loader_data, (bytes + 7)/8, THREAD); + strptr = (char*)(arr->data()); + strptr_max = strptr + bytes; + SharedClassPathEntry* table = (SharedClassPathEntry*)strptr; + strptr += entry_size * count; - // If directories appear in boot classpath, they must be empty to - // avoid having to verify each individual class file. - const char* name = ((ClassPathDirEntry*)cpe)->name(); - if (!os::dir_is_empty(name)) { - fail_stop("Boot classpath directory %s is not empty.", name); - } + _classpath_entry_table_size = count; + _classpath_entry_table = table; + _classpath_entry_size = entry_size; } } } +bool FileMapInfo::validate_classpath_entry_table() { + _validating_classpath_entry_table = true; + + int count = _header->_classpath_entry_table_size; + + _classpath_entry_table = _header->_classpath_entry_table; + _classpath_entry_size = _header->_classpath_entry_size; + + for (int i=0; i_name; + bool ok = true; + if (TraceClassPaths || (TraceClassLoading && Verbose)) { + tty->print_cr("[Checking shared classpath entry: %s]", name); + } + if (os::stat(name, &st) != 0) { + fail_continue("Required classpath entry does not exist: %s", name); + ok = false; + } else if (ent->is_dir()) { + if (!os::dir_is_empty(name)) { + fail_continue("directory is not empty: %s", name); + ok = false; + } + } else { + if (ent->_timestamp != st.st_mtime || + ent->_filesize != st.st_size) { + ok = false; + if (PrintSharedArchiveAndExit) { + fail_continue(ent->_timestamp != st.st_mtime ? + "Timestamp mismatch" : + "File size mismatch"); + } else { + fail_continue("A jar file is not the one used while building" + " the shared archive file: %s", name); + } + } + } + if (ok) { + if (TraceClassPaths || (TraceClassLoading && Verbose)) { + tty->print_cr("[ok]"); + } + } else if (!PrintSharedArchiveAndExit) { + _validating_classpath_entry_table = false; + return false; + } + } + + _classpath_entry_table_size = _header->_classpath_entry_table_size; + _validating_classpath_entry_table = false; + return true; +} + // Read the FileMapInfo information from the file. bool FileMapInfo::init_from_file(int fd) { - - size_t n = read(fd, &_header, sizeof(struct FileMapHeader)); - if (n != sizeof(struct FileMapHeader)) { + size_t sz = _header->data_size(); + char* addr = _header->data(); + size_t n = os::read(fd, addr, (unsigned int)sz); + if (n != sz) { fail_continue("Unable to read the file header."); return false; } - if (_header._version != current_version()) { + if (_header->_version != current_version()) { fail_continue("The shared archive file has the wrong version."); return false; } + + size_t info_size = _header->_paths_misc_info_size; + _paths_misc_info = NEW_C_HEAP_ARRAY_RETURN_NULL(char, info_size, mtClass); + if (_paths_misc_info == NULL) { + fail_continue("Unable to read the file header."); + return false; + } + n = os::read(fd, _paths_misc_info, (unsigned int)info_size); + if (n != info_size) { + fail_continue("Unable to read the shared path info header."); + FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass); + _paths_misc_info = NULL; + return false; + } + size_t len = lseek(fd, 0, SEEK_END); struct FileMapInfo::FileMapHeader::space_info* si = - &_header._space[MetaspaceShared::mc]; + &_header->_space[MetaspaceShared::mc]; if (si->_file_offset >= len || len - si->_file_offset < si->_used) { fail_continue("The shared archive file has been truncated."); return false; } - _file_offset = n; + + _file_offset += (long)n; return true; } @@ -239,7 +392,16 @@ // Write the header to the file, seek to the next allocation boundary. void FileMapInfo::write_header() { - write_bytes_aligned(&_header, sizeof(FileMapHeader)); + int info_size = ClassLoader::get_shared_paths_misc_info_size(); + + _header->_paths_misc_info_size = info_size; + + align_file_position(); + size_t sz = _header->data_size(); + char* addr = _header->data(); + write_bytes(addr, (int)sz); // skip the C++ vtable + write_bytes(ClassLoader::get_shared_paths_misc_info(), info_size); + align_file_position(); } @@ -249,7 +411,7 @@ align_file_position(); size_t used = space->used_bytes_slow(Metaspace::NonClassType); size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType); - struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; + struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i]; write_region(i, (char*)space->bottom(), used, capacity, read_only, false); } @@ -259,7 +421,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size, size_t capacity, bool read_only, bool allow_exec) { - struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[region]; + struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[region]; if (_file_open) { guarantee(si->_file_offset == _file_offset, "file offset mismatch."); @@ -343,7 +505,7 @@ // JVM/TI RedefineClasses() support: // Remap the shared readonly space to shared readwrite, private. bool FileMapInfo::remap_shared_readonly_as_readwrite() { - struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0]; + struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0]; if (!si->_read_only) { // the space is already readwrite so we are done return true; @@ -371,7 +533,7 @@ // Map the whole region at once, assumed to be allocated contiguously. ReservedSpace FileMapInfo::reserve_shared_memory() { - struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0]; + struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0]; char* requested_addr = si->_base; size_t size = FileMapInfo::shared_spaces_size(); @@ -393,7 +555,7 @@ static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"}; char* FileMapInfo::map_region(int i) { - struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; + struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i]; size_t used = si->_used; size_t alignment = os::vm_allocation_granularity(); size_t size = align_size_up(used, alignment); @@ -419,10 +581,10 @@ if (!VerifySharedSpaces) { return true; } - const char* buf = _header._space[i]._base; - size_t sz = _header._space[i]._used; + const char* buf = _header->_space[i]._base; + size_t sz = _header->_space[i]._used; int crc = ClassLoader::crc32(0, buf, (jint)sz); - if (crc != _header._space[i]._crc) { + if (crc != _header->_space[i]._crc) { fail_continue("Checksum verification failed."); return false; } @@ -432,7 +594,7 @@ // Unmap a memory region in the address space. void FileMapInfo::unmap_region(int i) { - struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; + struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i]; size_t used = si->_used; size_t size = align_size_up(used, os::vm_allocation_granularity()); if (!os::unmap_memory(si->_base, size)) { @@ -449,12 +611,21 @@ FileMapInfo* FileMapInfo::_current_info = NULL; - +SharedClassPathEntry* FileMapInfo::_classpath_entry_table = NULL; +int FileMapInfo::_classpath_entry_table_size = 0; +size_t FileMapInfo::_classpath_entry_size = 0x1234baad; +bool FileMapInfo::_validating_classpath_entry_table = false; // Open the shared archive file, read and validate the header // information (version, boot classpath, etc.). If initialization // fails, shared spaces are disabled and the file is closed. [See // fail_continue.] +// +// Validation of the archive is done in two steps: +// +// [1] validate_header() - done here. This checks the header, including _paths_misc_info. +// [2] validate_classpath_entry_table - this is done later, because the table is in the RW +// region of the archive, which is not mapped yet. bool FileMapInfo::initialize() { assert(UseSharedSpaces, "UseSharedSpaces expected."); @@ -468,104 +639,84 @@ } init_from_file(_fd); - if (!validate()) { + if (!validate_header()) { return false; } - SharedReadOnlySize = _header._space[0]._capacity; - SharedReadWriteSize = _header._space[1]._capacity; - SharedMiscDataSize = _header._space[2]._capacity; - SharedMiscCodeSize = _header._space[3]._capacity; + SharedReadOnlySize = _header->_space[0]._capacity; + SharedReadWriteSize = _header->_space[1]._capacity; + SharedMiscDataSize = _header->_space[2]._capacity; + SharedMiscCodeSize = _header->_space[3]._capacity; return true; } -int FileMapInfo::compute_header_crc() { - char* header = (char*)&_header; +int FileMapInfo::FileMapHeader::compute_crc() { + char* header = data(); // start computing from the field after _crc - char* buf = (char*)&_header._crc + sizeof(int); - size_t sz = sizeof(FileMapInfo::FileMapHeader) - (buf - header); + char* buf = (char*)&_crc + sizeof(int); + size_t sz = data_size() - (buf - header); int crc = ClassLoader::crc32(0, buf, (jint)sz); return crc; } -bool FileMapInfo::validate() { - if (VerifySharedSpaces && compute_header_crc() != _header._crc) { +int FileMapInfo::compute_header_crc() { + return _header->compute_crc(); +} + +bool FileMapInfo::FileMapHeader::validate() { + if (_magic != (int)0xf00baba2) { + FileMapInfo::fail_continue("The shared archive file has a bad magic number."); + return false; + } + if (VerifySharedSpaces && compute_crc() != _crc) { fail_continue("Header checksum verification failed."); return false; } - if (_header._version != current_version()) { - fail_continue("The shared archive file is the wrong version."); - return false; - } - if (_header._magic != (int)0xf00baba2) { - fail_continue("The shared archive file has a bad magic number."); + if (_version != current_version()) { + FileMapInfo::fail_continue("The shared archive file is the wrong version."); + return false; } char header_version[JVM_IDENT_MAX]; get_header_version(header_version); - if (strncmp(_header._jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) { - fail_continue("The shared archive file was created by a different" - " version or build of HotSpot."); + if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) { + if (TraceClassPaths) { + tty->print_cr("Expected: %s", header_version); + tty->print_cr("Actual: %s", _jvm_ident); + } + FileMapInfo::fail_continue("The shared archive file was created by a different" + " version or build of HotSpot"); return false; } - if (_header._obj_alignment != ObjectAlignmentInBytes) { - fail_continue("The shared archive file's ObjectAlignmentInBytes of %d" + if (_obj_alignment != ObjectAlignmentInBytes) { + FileMapInfo::fail_continue("The shared archive file's ObjectAlignmentInBytes of %d" " does not equal the current ObjectAlignmentInBytes of %d.", - _header._obj_alignment, ObjectAlignmentInBytes); - return false; - } - - // Cannot verify interpreter yet, as it can only be created after the GC - // heap has been initialized. - - if (_header._num_jars >= JVM_SHARED_JARS_MAX) { - fail_continue("Too many jar files to share."); - return false; - } - - // Build checks on classpath and jar files - int num_jars_now = 0; - ClassPathEntry *cpe = ClassLoader::classpath_entry(0); - for ( ; cpe != NULL; cpe = cpe->next()) { - - if (cpe->is_jar_file()) { - if (num_jars_now < _header._num_jars) { - - // Jar file - verify timestamp and file size. - struct stat st; - const char *path = cpe->name(); - if (os::stat(path, &st) != 0) { - fail_continue("Unable to open jar file %s.", path); - return false; - } - if (_header._jar[num_jars_now]._timestamp != st.st_mtime || - _header._jar[num_jars_now]._filesize != st.st_size) { - fail_continue("A jar file is not the one used while building" - " the shared archive file."); - return false; - } - } - ++num_jars_now; - } else { - - // If directories appear in boot classpath, they must be empty to - // avoid having to verify each individual class file. - const char* name = ((ClassPathDirEntry*)cpe)->name(); - if (!os::dir_is_empty(name)) { - fail_continue("Boot classpath directory %s is not empty.", name); - return false; - } - } - } - if (num_jars_now < _header._num_jars) { - fail_continue("The number of jar files in the boot classpath is" - " less than the number the shared archive was created with."); + _obj_alignment, ObjectAlignmentInBytes); return false; } return true; } +bool FileMapInfo::validate_header() { + bool status = _header->validate(); + + if (status) { + if (!ClassLoader::check_shared_paths_misc_info(_paths_misc_info, _header->_paths_misc_info_size)) { + if (!PrintSharedArchiveAndExit) { + fail_continue("shared class paths mismatch (hint: enable -XX:+TraceClassPaths to diagnose the failure)"); + status = false; + } + } + } + + if (_paths_misc_info != NULL) { + FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass); + _paths_misc_info = NULL; + } + return status; +} + // The following method is provided to see whether a given pointer // falls in the mapped shared space. // Param: @@ -574,8 +725,8 @@ // True if the p is within the mapped shared space, otherwise, false. bool FileMapInfo::is_in_shared_space(const void* p) { for (int i = 0; i < MetaspaceShared::n_regions; i++) { - if (p >= _header._space[i]._base && - p < _header._space[i]._base + _header._space[i]._used) { + if (p >= _header->_space[i]._base && + p < _header->_space[i]._base + _header->_space[i]._used) { return true; } } @@ -586,7 +737,7 @@ void FileMapInfo::print_shared_spaces() { gclog_or_tty->print_cr("Shared Spaces:"); for (int i = 0; i < MetaspaceShared::n_regions; i++) { - struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; + struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i]; gclog_or_tty->print(" %s " INTPTR_FORMAT "-" INTPTR_FORMAT, shared_region_name[i], si->_base, si->_base + si->_used); @@ -599,9 +750,9 @@ if (map_info) { map_info->fail_continue(msg); for (int i = 0; i < MetaspaceShared::n_regions; i++) { - if (map_info->_header._space[i]._base != NULL) { + if (map_info->_header->_space[i]._base != NULL) { map_info->unmap_region(i); - map_info->_header._space[i]._base = NULL; + map_info->_header->_space[i]._base = NULL; } } } else if (DumpSharedSpaces) { --- ./hotspot/src/share/vm/memory/filemap.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/filemap.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -37,30 +37,55 @@ // misc data (block offset table, string table, symbols, dictionary, etc.) // tag(666) -static const int JVM_SHARED_JARS_MAX = 128; -static const int JVM_SPACENAME_MAX = 128; static const int JVM_IDENT_MAX = 256; -static const int JVM_ARCH_MAX = 12; - class Metaspace; +class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC { +public: + const char *_name; + time_t _timestamp; // jar timestamp, 0 if is directory + long _filesize; // jar file size, -1 if is directory + bool is_dir() { + return _filesize == -1; + } +}; + class FileMapInfo : public CHeapObj { private: + friend class ManifestStream; enum { _invalid_version = -1, - _current_version = 1 + _current_version = 2 }; bool _file_open; int _fd; size_t _file_offset; +private: + static SharedClassPathEntry* _classpath_entry_table; + static int _classpath_entry_table_size; + static size_t _classpath_entry_size; + static bool _validating_classpath_entry_table; + // FileMapHeader describes the shared space data in the file to be // mapped. This structure gets written to a file. It is not a class, so // that the compilers don't add any compiler-private data to it. - struct FileMapHeader { +public: + struct FileMapHeaderBase : public CHeapObj { + virtual bool validate() = 0; + virtual void populate(FileMapInfo* info, size_t alignment) = 0; + }; + struct FileMapHeader : FileMapHeaderBase { + // Use data() and data_size() to memcopy to/from the FileMapHeader. We need to + // avoid read/writing the C++ vtable pointer. + static size_t data_size(); + char* data() { + return ((char*)this) + sizeof(FileMapHeaderBase); + } + int _magic; // identify file type. int _crc; // header crc checksum. int _version; // (from enum, above.) @@ -80,46 +105,67 @@ // The following fields are all sanity checks for whether this archive // will function correctly with this JVM and the bootclasspath it's // invoked with. - char _arch[JVM_ARCH_MAX]; // architecture char _jvm_ident[JVM_IDENT_MAX]; // identifier for jvm - int _num_jars; // Number of jars in bootclasspath - // Per jar file data: timestamp, size. + // The _paths_misc_info is a variable-size structure that records "miscellaneous" + // information during dumping. It is generated and validated by the + // SharedPathsMiscInfo class. See SharedPathsMiscInfo.hpp and sharedClassUtil.hpp for + // detailed description. + // + // The _paths_misc_info data is stored as a byte array in the archive file header, + // immediately after the _header field. This information is used only when + // checking the validity of the archive and is deallocated after the archive is loaded. + // + // Note that the _paths_misc_info does NOT include information for JAR files + // that existed during dump time. Their information is stored in _classpath_entry_table. + int _paths_misc_info_size; - struct { - time_t _timestamp; // jar timestamp. - long _filesize; // jar file size. - } _jar[JVM_SHARED_JARS_MAX]; - } _header; + // The following is a table of all the class path entries that were used + // during dumping. At run time, we require these files to exist and have the same + // size/modification time, or else the archive will refuse to load. + // + // All of these entries must be JAR files. The dumping process would fail if a non-empty + // directory was specified in the classpaths. If an empty directory was specified + // it is checked by the _paths_misc_info as described above. + // + // FIXME -- if JAR files in the tail of the list were specified but not used during dumping, + // they should be removed from this table, to save space and to avoid spurious + // loading failures during runtime. + int _classpath_entry_table_size; + size_t _classpath_entry_size; + SharedClassPathEntry* _classpath_entry_table; + + virtual bool validate(); + virtual void populate(FileMapInfo* info, size_t alignment); + int compute_crc(); + }; + + FileMapHeader * _header; + const char* _full_path; + char* _paths_misc_info; static FileMapInfo* _current_info; bool init_from_file(int fd); void align_file_position(); + bool validate_header_impl(); public: - FileMapInfo() { - _file_offset = 0; - _file_open = false; - _header._version = _invalid_version; - } + FileMapInfo(); + ~FileMapInfo(); static int current_version() { return _current_version; } int compute_header_crc(); - void set_header_crc(int crc) { _header._crc = crc; } + void set_header_crc(int crc) { _header->_crc = crc; } void populate_header(size_t alignment); - bool validate(); + bool validate_header(); void invalidate(); - int version() { return _header._version; } - size_t alignment() { return _header._alignment; } - size_t space_capacity(int i) { return _header._space[i]._capacity; } - char* region_base(int i) { return _header._space[i]._base; } - struct FileMapHeader* header() { return &_header; } - - static void set_current_info(FileMapInfo* info) { - CDS_ONLY(_current_info = info;) - } + int version() { return _header->_version; } + size_t alignment() { return _header->_alignment; } + size_t space_capacity(int i) { return _header->_space[i]._capacity; } + char* region_base(int i) { return _header->_space[i]._base; } + struct FileMapHeader* header() { return _header; } static FileMapInfo* current_info() { CDS_ONLY(return _current_info;) @@ -151,7 +197,7 @@ // Errors. static void fail_stop(const char *msg, ...); - void fail_continue(const char *msg, ...); + static void fail_continue(const char *msg, ...); // Return true if given address is in the mapped shared space. bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false); @@ -165,6 +211,22 @@ // Stop CDS sharing and unmap CDS regions. static void stop_sharing_and_unmap(const char* msg); + + static void allocate_classpath_entry_table(); + bool validate_classpath_entry_table(); + + static SharedClassPathEntry* shared_classpath(int index) { + char* p = (char*)_classpath_entry_table; + p += _classpath_entry_size * index; + return (SharedClassPathEntry*)p; + } + static const char* shared_classpath_name(int index) { + return shared_classpath(index)->_name; + } + + static int get_number_of_share_classpaths() { + return _classpath_entry_table_size; + } }; #endif // SHARE_VM_MEMORY_FILEMAP_HPP --- ./hotspot/src/share/vm/memory/freeList.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/freeList.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -34,7 +34,6 @@ #if INCLUDE_ALL_GCS #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" -#include "gc_implementation/g1/g1CodeCacheRemSet.hpp" #endif // INCLUDE_ALL_GCS // Free list. A FreeList is used to access a linked list of chunks @@ -333,5 +332,4 @@ template class FreeList; #if INCLUDE_ALL_GCS template class FreeList; -template class FreeList; #endif // INCLUDE_ALL_GCS --- ./hotspot/src/share/vm/memory/gcLocker.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/gcLocker.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,6 +26,7 @@ #include "memory/gcLocker.inline.hpp" #include "memory/resourceArea.hpp" #include "memory/sharedHeap.hpp" +#include "runtime/thread.inline.hpp" volatile jint GC_locker::_jni_lock_count = 0; volatile bool GC_locker::_needs_gc = false; --- ./hotspot/src/share/vm/memory/genCollectedHeap.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/genCollectedHeap.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,6 +28,7 @@ #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "gc_implementation/shared/collectorCounters.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" #include "gc_interface/collectedHeap.inline.hpp" @@ -60,8 +61,8 @@ GenCollectedHeap* GenCollectedHeap::_gch; NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) -// The set of potentially parallel tasks in strong root scanning. -enum GCH_process_strong_roots_tasks { +// The set of potentially parallel tasks in root scanning. +enum GCH_strong_roots_tasks { // We probably want to parallelize both of these internally, but for now... GCH_PS_younger_gens, // Leave this one last. @@ -71,11 +72,11 @@ GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : SharedHeap(policy), _gen_policy(policy), - _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), + _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)), _full_collections_completed(0) { - if (_gen_process_strong_tasks == NULL || - !_gen_process_strong_tasks->valid()) { + if (_gen_process_roots_tasks == NULL || + !_gen_process_roots_tasks->valid()) { vm_exit_during_initialization("Failed necessary allocation."); } assert(policy != NULL, "Sanity check"); @@ -385,7 +386,9 @@ const char* gc_cause_prefix = complete ? "Full GC" : "GC"; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL); + // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later + // so we can assume here that the next GC id is what we want. + GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); gc_prologue(complete); increment_total_collections(complete); @@ -418,7 +421,9 @@ } // Timer for individual generations. Last argument is false: no CR // FIXME: We should try to start the timing earlier to cover more of the GC pause - GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL); + // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later + // so we can assume here that the next GC id is what we want. + GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek()); TraceCollectorStats tcs(_gens[i]->counters()); TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); @@ -585,33 +590,29 @@ void GenCollectedHeap::set_par_threads(uint t) { SharedHeap::set_par_threads(t); - _gen_process_strong_tasks->set_n_threads(t); + _gen_process_roots_tasks->set_n_threads(t); } void GenCollectedHeap:: -gen_process_strong_roots(int level, - bool younger_gens_as_roots, - bool activate_scope, - bool is_scavenging, - SharedHeap::ScanningOption so, - OopsInGenClosure* not_older_gens, - bool do_code_roots, - OopsInGenClosure* older_gens, - KlassClosure* klass_closure) { - // General strong roots. +gen_process_roots(int level, + bool younger_gens_as_roots, + bool activate_scope, + SharedHeap::ScanningOption so, + OopsInGenClosure* not_older_gens, + OopsInGenClosure* weak_roots, + OopsInGenClosure* older_gens, + CLDClosure* cld_closure, + CLDClosure* weak_cld_closure, + CodeBlobClosure* code_closure) { - if (!do_code_roots) { - SharedHeap::process_strong_roots(activate_scope, is_scavenging, so, - not_older_gens, NULL, klass_closure); - } else { - bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active()); - CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking); - SharedHeap::process_strong_roots(activate_scope, is_scavenging, so, - not_older_gens, &code_roots, klass_closure); - } + // General roots. + SharedHeap::process_roots(activate_scope, so, + not_older_gens, weak_roots, + cld_closure, weak_cld_closure, + code_closure); if (younger_gens_as_roots) { - if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { + if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) { for (int i = 0; i < level; i++) { not_older_gens->set_generation(_gens[i]); _gens[i]->oop_iterate(not_older_gens); @@ -627,12 +628,42 @@ older_gens->reset_generation(); } - _gen_process_strong_tasks->all_tasks_completed(); + _gen_process_roots_tasks->all_tasks_completed(); } -void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots) { - SharedHeap::process_weak_roots(root_closure, code_roots); +void GenCollectedHeap:: +gen_process_roots(int level, + bool younger_gens_as_roots, + bool activate_scope, + SharedHeap::ScanningOption so, + bool only_strong_roots, + OopsInGenClosure* not_older_gens, + OopsInGenClosure* older_gens, + CLDClosure* cld_closure) { + + const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; + + bool is_moving_collection = false; + if (level == 0 || is_adjust_phase) { + // young collections are always moving + is_moving_collection = true; + } + + MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection); + CodeBlobClosure* code_closure = &mark_code_closure; + + gen_process_roots(level, + younger_gens_as_roots, + activate_scope, so, + not_older_gens, only_strong_roots ? NULL : not_older_gens, + older_gens, + cld_closure, only_strong_roots ? NULL : cld_closure, + code_closure); + +} + +void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { + SharedHeap::process_weak_roots(root_closure); // "Local" "weak" refs for (int i = 0; i < _n_gens; i++) { _gens[i]->ref_processor()->weak_oops_do(root_closure); @@ -673,10 +704,6 @@ return _gens[0]->end_addr(); } -size_t GenCollectedHeap::unsafe_max_alloc() { - return _gens[0]->unsafe_max_alloc_nogc(); -} - // public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) { @@ -687,15 +714,18 @@ #else // INCLUDE_ALL_GCS ShouldNotReachHere(); #endif // INCLUDE_ALL_GCS + } else if (cause == GCCause::_wb_young_gc) { + // minor collection for WhiteBox API + collect(cause, 0); } else { #ifdef ASSERT - if (cause == GCCause::_scavenge_alot) { - // minor collection only - collect(cause, 0); - } else { - // Stop-the-world full collection - collect(cause, n_gens() - 1); - } + if (cause == GCCause::_scavenge_alot) { + // minor collection only + collect(cause, 0); + } else { + // Stop-the-world full collection + collect(cause, n_gens() - 1); + } #else // Stop-the-world full collection collect(cause, n_gens() - 1); @@ -851,12 +881,6 @@ } } -void GenCollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->oop_iterate(mr, cl); - } -} - void GenCollectedHeap::object_iterate(ObjectClosure* cl) { for (int i = 0; i < _n_gens; i++) { _gens[i]->object_iterate(cl); @@ -1074,7 +1098,7 @@ guarantee(_n_gens = 2, "Wrong number of generations"); Generation* old_gen = _gens[1]; // Start by compacting into same gen. - CompactPoint cp(old_gen, NULL, NULL); + CompactPoint cp(old_gen); old_gen->prepare_for_compaction(&cp); Generation* young_gen = _gens[0]; young_gen->prepare_for_compaction(&cp); --- ./hotspot/src/share/vm/memory/genCollectedHeap.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/genCollectedHeap.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -78,9 +78,9 @@ unsigned int _full_collections_completed; // Data structure for claiming the (potentially) parallel tasks in - // (gen-specific) strong roots processing. - SubTasksDone* _gen_process_strong_tasks; - SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; } + // (gen-specific) roots processing. + SubTasksDone* _gen_process_roots_tasks; + SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; } // In block contents verification, the number of header words to skip NOT_PRODUCT(static size_t _skip_header_HeapWords;) @@ -166,14 +166,6 @@ HeapWord** top_addr() const; HeapWord** end_addr() const; - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection activity. In a generational - // collector, for example, this is probably the largest allocation that - // could be supported in the youngest generation. It is "unsafe" because - // no locks are taken; the result should be treated as an approximation, - // not a guarantee. - size_t unsafe_max_alloc(); - // Does this heap support heap inspection? (+PrintClassHistogram) virtual bool supports_heap_inspection() const { return true; } @@ -220,7 +212,6 @@ // Iteration functions. void oop_iterate(ExtendedOopClosure* cl); - void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); void object_iterate(ObjectClosure* cl); void safe_object_iterate(ObjectClosure* cl); Space* space_containing(const void* addr) const; @@ -412,26 +403,35 @@ // The "so" argument determines which of the roots // the closure is applied to: // "SO_None" does none; - // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; - // "SO_SystemClasses" to all the "system" classes and loaders; - // "SO_Strings" applies the closure to all entries in the StringTable. - void gen_process_strong_roots(int level, - bool younger_gens_as_roots, - // The remaining arguments are in an order - // consistent with SharedHeap::process_strong_roots: - bool activate_scope, - bool is_scavenging, - SharedHeap::ScanningOption so, - OopsInGenClosure* not_older_gens, - bool do_code_roots, - OopsInGenClosure* older_gens, - KlassClosure* klass_closure); + private: + void gen_process_roots(int level, + bool younger_gens_as_roots, + bool activate_scope, + SharedHeap::ScanningOption so, + OopsInGenClosure* not_older_gens, + OopsInGenClosure* weak_roots, + OopsInGenClosure* older_gens, + CLDClosure* cld_closure, + CLDClosure* weak_cld_closure, + CodeBlobClosure* code_closure); - // Apply "blk" to all the weak roots of the system. These include - // JNI weak roots, the code cache, system dictionary, symbol table, - // string table, and referents of reachable weak refs. - void gen_process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots); + public: + static const bool StrongAndWeakRoots = false; + static const bool StrongRootsOnly = true; + + void gen_process_roots(int level, + bool younger_gens_as_roots, + bool activate_scope, + SharedHeap::ScanningOption so, + bool only_strong_roots, + OopsInGenClosure* not_older_gens, + OopsInGenClosure* older_gens, + CLDClosure* cld_closure); + + // Apply "root_closure" to all the weak roots of the system. + // These include JNI weak roots, string table, + // and referents of reachable weak refs. + void gen_process_weak_roots(OopClosure* root_closure); // Set the saved marks of generations, if that makes sense. // In particular, if any generation might iterate over the oops --- ./hotspot/src/share/vm/memory/genMarkSweep.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/genMarkSweep.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -69,7 +69,7 @@ _ref_processor = rp; rp->setup_policy(clear_all_softrefs); - GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); + GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id()); gch->trace_heap_before_gc(_gc_tracer); @@ -193,7 +193,7 @@ void GenMarkSweep::mark_sweep_phase1(int level, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer); + GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace(" 1"); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -207,22 +207,21 @@ // Need new claim bits before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); - gch->gen_process_strong_roots(level, - false, // Younger gens are not roots. - true, // activate StrongRootsScope - false, // not scavenging - SharedHeap::SO_SystemClasses, - &follow_root_closure, - true, // walk code active on stacks - &follow_root_closure, - &follow_klass_closure); + gch->gen_process_roots(level, + false, // Younger gens are not roots. + true, // activate StrongRootsScope + SharedHeap::SO_None, + GenCollectedHeap::StrongRootsOnly, + &follow_root_closure, + &follow_root_closure, + &follow_cld_closure); // Process reference objects found during marking { ref_processor()->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( - &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer); + &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id()); gc_tracer()->report_gc_reference_stats(stats); } @@ -264,7 +263,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); - GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer); + GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("2"); gch->prepare_for_compaction(); @@ -281,7 +280,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); // Adjust the pointers to reflect the new locations - GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer); + GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("3"); // Need new claim bits for the pointer adjustment tracing. @@ -293,22 +292,16 @@ // are run. adjust_pointer_closure.set_orig_generation(gch->get_gen(level)); - gch->gen_process_strong_roots(level, - false, // Younger gens are not roots. - true, // activate StrongRootsScope - false, // not scavenging - SharedHeap::SO_AllClasses, - &adjust_pointer_closure, - false, // do not walk code - &adjust_pointer_closure, - &adjust_klass_closure); + gch->gen_process_roots(level, + false, // Younger gens are not roots. + true, // activate StrongRootsScope + SharedHeap::SO_AllCodeCache, + GenCollectedHeap::StrongAndWeakRoots, + &adjust_pointer_closure, + &adjust_pointer_closure, + &adjust_cld_closure); - // Now adjust pointers in remaining weak roots. (All of which should - // have been cleared if they pointed to non-surviving objects.) - CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure, - /*do_marking=*/ false); - gch->gen_process_weak_roots(&adjust_pointer_closure, - &adjust_code_pointer_closure); + gch->gen_process_weak_roots(&adjust_pointer_closure); adjust_marks(); GenAdjustPointersClosure blk; @@ -336,7 +329,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. GenCollectedHeap* gch = GenCollectedHeap::heap(); - GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer); + GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("4"); GenCompactClosure blk; --- ./hotspot/src/share/vm/memory/generation.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/generation.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -297,22 +297,16 @@ class GenerationOopIterateClosure : public SpaceClosure { public: - ExtendedOopClosure* cl; - MemRegion mr; + ExtendedOopClosure* _cl; virtual void do_space(Space* s) { - s->oop_iterate(mr, cl); + s->oop_iterate(_cl); } - GenerationOopIterateClosure(ExtendedOopClosure* _cl, MemRegion _mr) : - cl(_cl), mr(_mr) {} + GenerationOopIterateClosure(ExtendedOopClosure* cl) : + _cl(cl) {} }; void Generation::oop_iterate(ExtendedOopClosure* cl) { - GenerationOopIterateClosure blk(cl, _reserved); - space_iterate(&blk); -} - -void Generation::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { - GenerationOopIterateClosure blk(cl, mr); + GenerationOopIterateClosure blk(cl); space_iterate(&blk); } --- ./hotspot/src/share/vm/memory/generation.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/generation.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -543,10 +543,6 @@ // generation, calling "cl.do_oop" on each. virtual void oop_iterate(ExtendedOopClosure* cl); - // Same as above, restricted to the intersection of a memory region and - // the generation. - virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); - // Iterate over all objects in the generation, calling "cl.do_object" on // each. virtual void object_iterate(ObjectClosure* cl); --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/memory/guardedMemory.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "memory/allocation.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/guardedMemory.hpp" +#include "runtime/os.hpp" + +void* GuardedMemory::wrap_copy(const void* ptr, const size_t len, const void* tag) { + size_t total_sz = GuardedMemory::get_total_size(len); + void* outerp = os::malloc(total_sz, mtInternal); + if (outerp != NULL) { + GuardedMemory guarded(outerp, len, tag); + void* innerp = guarded.get_user_ptr(); + memcpy(innerp, ptr, len); + return innerp; + } + return NULL; // OOM +} + +bool GuardedMemory::free_copy(void* p) { + if (p == NULL) { + return true; + } + GuardedMemory guarded((u_char*)p); + bool verify_ok = guarded.verify_guards(); + + /* always attempt to free, pass problem on to any nested memchecker */ + os::free(guarded.release_for_freeing()); + + return verify_ok; +} + +void GuardedMemory::print_on(outputStream* st) const { + if (_base_addr == NULL) { + st->print_cr("GuardedMemory(" PTR_FORMAT ") not associated to any memory", p2i(this)); + return; + } + st->print_cr("GuardedMemory(" PTR_FORMAT ") base_addr=" PTR_FORMAT + " tag=" PTR_FORMAT " user_size=" SIZE_FORMAT " user_data=" PTR_FORMAT, + p2i(this), p2i(_base_addr), p2i(get_tag()), get_user_size(), p2i(get_user_ptr())); + + Guard* guard = get_head_guard(); + st->print_cr(" Header guard @" PTR_FORMAT " is %s", p2i(guard), (guard->verify() ? "OK" : "BROKEN")); + guard = get_tail_guard(); + st->print_cr(" Trailer guard @" PTR_FORMAT " is %s", p2i(guard), (guard->verify() ? "OK" : "BROKEN")); + + u_char udata = *get_user_ptr(); + switch (udata) { + case uninitBlockPad: + st->print_cr(" User data appears unused"); + break; + case freeBlockPad: + st->print_cr(" User data appears to have been freed"); + break; + default: + st->print_cr(" User data appears to be in use"); + break; + } +} + +// test code... + +#ifndef PRODUCT + +static void guarded_memory_test_check(void* p, size_t sz, void* tag) { + assert(p != NULL, "NULL pointer given to check"); + u_char* c = (u_char*) p; + GuardedMemory guarded(c); + assert(guarded.get_tag() == tag, "Tag is not the same as supplied"); + assert(guarded.get_user_ptr() == c, "User pointer is not the same as supplied"); + assert(guarded.get_user_size() == sz, "User size is not the same as supplied"); + assert(guarded.verify_guards(), "Guard broken"); +} + +void GuardedMemory::test_guarded_memory() { + // Test the basic characteristics... + size_t total_sz = GuardedMemory::get_total_size(1); + assert(total_sz > 1 && total_sz >= (sizeof(GuardHeader) + 1 + sizeof(Guard)), "Unexpected size"); + u_char* basep = (u_char*) os::malloc(total_sz, mtInternal); + + GuardedMemory guarded(basep, 1, (void*)0xf000f000); + + assert(*basep == badResourceValue, "Expected guard in the form of badResourceValue"); + u_char* userp = guarded.get_user_ptr(); + assert(*userp == uninitBlockPad, "Expected uninitialized data in the form of uninitBlockPad"); + guarded_memory_test_check(userp, 1, (void*)0xf000f000); + + void* freep = guarded.release_for_freeing(); + assert((u_char*)freep == basep, "Expected the same pointer guard was "); + assert(*userp == freeBlockPad, "Expected user data to be free block padded"); + assert(!guarded.verify_guards(), "Expected failed"); + os::free(freep); + + // Test a number of odd sizes... + size_t sz = 0; + do { + void* p = os::malloc(GuardedMemory::get_total_size(sz), mtInternal); + void* up = guarded.wrap_with_guards(p, sz, (void*)1); + memset(up, 0, sz); + guarded_memory_test_check(up, sz, (void*)1); + os::free(guarded.release_for_freeing()); + sz = (sz << 4) + 1; + } while (sz < (256 * 1024)); + + // Test buffer overrun into head... + basep = (u_char*) os::malloc(GuardedMemory::get_total_size(1), mtInternal); + guarded.wrap_with_guards(basep, 1); + *basep = 0; + assert(!guarded.verify_guards(), "Expected failure"); + os::free(basep); + + // Test buffer overrun into tail with a number of odd sizes... + sz = 1; + do { + void* p = os::malloc(GuardedMemory::get_total_size(sz), mtInternal); + void* up = guarded.wrap_with_guards(p, sz, (void*)1); + memset(up, 0, sz + 1); // Buffer-overwrite (within guard) + assert(!guarded.verify_guards(), "Guard was not broken as expected"); + os::free(guarded.release_for_freeing()); + sz = (sz << 4) + 1; + } while (sz < (256 * 1024)); + + // Test wrap_copy/wrap_free... + assert(GuardedMemory::free_copy(NULL), "Expected free NULL to be OK"); + + const char* str = "Check my bounds out"; + size_t str_sz = strlen(str) + 1; + char* str_copy = (char*) GuardedMemory::wrap_copy(str, str_sz); + guarded_memory_test_check(str_copy, str_sz, NULL); + assert(strcmp(str, str_copy) == 0, "Not identical copy"); + assert(GuardedMemory::free_copy(str_copy), "Free copy failed to verify"); + + void* no_data = NULL; + void* no_data_copy = GuardedMemory::wrap_copy(no_data, 0); + assert(GuardedMemory::free_copy(no_data_copy), "Expected valid guards even for no data copy"); +} + +#endif // !PRODUCT + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/memory/guardedMemory.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_GUARDED_MEMORY_HPP +#define SHARE_VM_MEMORY_GUARDED_MEMORY_HPP + +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + +/** + * Guarded memory for detecting buffer overrun. + * + * Allows allocations to be wrapped with padded bytes of a known byte pattern, + * that is a "guard". Guard patterns may be verified to detect buffer overruns. + * + * Primarily used by "debug malloc" and "checked JNI". + * + * Memory layout: + * + * |Offset | Content | Description | + * |------------------------------------------------------------ + * |base_addr | 0xABABABABABABABAB | Head guard | + * |+16 | | User data size | + * |+sizeof(uintptr_t) | | Tag word | + * |+sizeof(void*) | 0xF1 ( | User data | + * |+user_size | 0xABABABABABABABAB | Tail guard | + * ------------------------------------------------------------- + * + * Where: + * - guard padding uses "badResourceValue" (0xAB) + * - tag word is general purpose + * - user data + * -- initially padded with "uninitBlockPad" (0xF1), + * -- to "freeBlockPad" (0xBA), when freed + * + * Usage: + * + * * Allocations: one may wrap allocations with guard memory: + * + * Thing* alloc_thing() { + * void* mem = user_alloc_fn(GuardedMemory::get_total_size(sizeof(thing))); + * GuardedMemory guarded(mem, sizeof(thing)); + * return (Thing*) guarded.get_user_ptr(); + * } + * + * * Verify: memory guards are still in tact + * + * bool verify_thing(Thing* thing) { + * GuardedMemory guarded((void*)thing); + * return guarded.verify_guards(); + * } + * + * * Free: one may mark bytes as freed (further debugging support) + * + * void free_thing(Thing* thing) { + * GuardedMemory guarded((void*)thing); + * assert(guarded.verify_guards(), "Corrupt thing"); + * user_free_fn(guards.release_for_freeing(); + * } + * + */ +class GuardedMemory : StackObj { // Wrapper on stack + + // Private inner classes for memory layout... + +protected: + + /** + * Guard class for header and trailer known pattern to test for overwrites. + */ + class Guard { // Class for raw memory (no vtbl allowed) + friend class GuardedMemory; + protected: + enum { + GUARD_SIZE = 16 + }; + + u_char _guard[GUARD_SIZE]; + + public: + + void build() { + u_char* c = _guard; // Possibly unaligned if tail guard + u_char* end = c + GUARD_SIZE; + while (c < end) { + *c = badResourceValue; + c++; + } + } + + bool verify() const { + u_char* c = (u_char*) _guard; + u_char* end = c + GUARD_SIZE; + while (c < end) { + if (*c != badResourceValue) { + return false; + } + c++; + } + return true; + } + + }; // GuardedMemory::Guard + + /** + * Header guard and size + */ + class GuardHeader : Guard { + friend class GuardedMemory; + protected: + // Take care in modifying fields here, will effect alignment + // e.g. x86 ABI 16 byte stack alignment + union { + uintptr_t __unused_full_word1; + size_t _user_size; + }; + void* _tag; + public: + void set_user_size(const size_t usz) { _user_size = usz; } + size_t get_user_size() const { return _user_size; } + + void set_tag(const void* tag) { _tag = (void*) tag; } + void* get_tag() const { return _tag; } + + }; // GuardedMemory::GuardHeader + + // Guarded Memory... + + protected: + u_char* _base_addr; + + public: + + /** + * Create new guarded memory. + * + * Wraps, starting at the given "base_ptr" with guards. Use "get_user_ptr()" + * to return a pointer suitable for user data. + * + * @param base_ptr allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes. + * @param user_size the size of the user data to be wrapped. + * @param tag optional general purpose tag. + */ + GuardedMemory(void* base_ptr, const size_t user_size, const void* tag = NULL) { + wrap_with_guards(base_ptr, user_size, tag); + } + + /** + * Wrap existing guarded memory. + * + * To use this constructor, one must have created guarded memory with + * "GuardedMemory(void*, size_t, void*)" (or indirectly via helper, e.g. "wrap_copy()"). + * + * @param user_p existing wrapped memory. + */ + GuardedMemory(void* userp) { + u_char* user_ptr = (u_char*) userp; + assert((uintptr_t)user_ptr > (sizeof(GuardHeader) + 0x1000), "Invalid pointer"); + _base_addr = (user_ptr - sizeof(GuardHeader)); + } + + /** + * Create new guarded memory. + * + * Wraps, starting at the given "base_ptr" with guards. Allows reuse of stack allocated helper. + * + * @param base_ptr allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes. + * @param user_size the size of the user data to be wrapped. + * @param tag optional general purpose tag. + * + * @return user data pointer (inner pointer to supplied "base_ptr"). + */ + void* wrap_with_guards(void* base_ptr, size_t user_size, const void* tag = NULL) { + assert(base_ptr != NULL, "Attempt to wrap NULL with memory guard"); + _base_addr = (u_char*)base_ptr; + get_head_guard()->build(); + get_head_guard()->set_user_size(user_size); + get_tail_guard()->build(); + set_tag(tag); + set_user_bytes(uninitBlockPad); + assert(verify_guards(), "Expected valid memory guards"); + return get_user_ptr(); + } + + /** + * Verify head and tail guards. + * + * @return true if guards are intact, false would indicate a buffer overrun. + */ + bool verify_guards() const { + if (_base_addr != NULL) { + return (get_head_guard()->verify() && get_tail_guard()->verify()); + } + return false; + } + + /** + * Set the general purpose tag. + * + * @param tag general purpose tag. + */ + void set_tag(const void* tag) { get_head_guard()->set_tag(tag); } + + /** + * Return the general purpose tag. + * + * @return the general purpose tag, defaults to NULL. + */ + void* get_tag() const { return get_head_guard()->get_tag(); } + + /** + * Return the size of the user data. + * + * @return the size of the user data. + */ + size_t get_user_size() const { + assert(_base_addr, "Not wrapping any memory"); + return get_head_guard()->get_user_size(); + } + + /** + * Return the user data pointer. + * + * @return the user data pointer. + */ + u_char* get_user_ptr() const { + assert(_base_addr, "Not wrapping any memory"); + return _base_addr + sizeof(GuardHeader); + } + + /** + * Release the wrapped pointer for resource freeing. + * + * Pads the user data with "freeBlockPad", and dis-associates the helper. + * + * @return the original base pointer used to wrap the data. + */ + void* release_for_freeing() { + set_user_bytes(freeBlockPad); + return release(); + } + + /** + * Dis-associate the help from the original base address. + * + * @return the original base pointer used to wrap the data. + */ + void* release() { + void* p = (void*) _base_addr; + _base_addr = NULL; + return p; + } + + virtual void print_on(outputStream* st) const; + + protected: + GuardHeader* get_head_guard() const { return (GuardHeader*) _base_addr; } + Guard* get_tail_guard() const { return (Guard*) (get_user_ptr() + get_user_size()); }; + void set_user_bytes(u_char ch) { + memset(get_user_ptr(), ch, get_user_size()); + } + +public: + /** + * Return the total size required for wrapping the given user size. + * + * @return the total size required for wrapping the given user size. + */ + static size_t get_total_size(size_t user_size) { + size_t total_size = sizeof(GuardHeader) + user_size + sizeof(Guard); + assert(total_size > user_size, "Unexpected wrap-around"); + return total_size; + } + + // Helper functions... + + /** + * Wrap a copy of size "len" of "ptr". + * + * @param ptr the memory to be copied + * @param len the length of the copy + * @param tag optional general purpose tag (see GuardedMemory::get_tag()) + * + * @return guarded wrapped memory pointer to the user area, or NULL if OOM. + */ + static void* wrap_copy(const void* p, const size_t len, const void* tag = NULL); + + /** + * Free wrapped copy. + * + * Frees memory copied with "wrap_copy()". + * + * @param p memory returned by "wrap_copy()". + * + * @return true if guards were verified as intact. false indicates a buffer overrun. + */ + static bool free_copy(void* p); + + // Testing... +#ifndef PRODUCT + static void test_guarded_memory(void); +#endif +}; // GuardedMemory + +#endif // SHARE_VM_MEMORY_GUARDED_MEMORY_HPP --- ./hotspot/src/share/vm/memory/heapInspection.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/heapInspection.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -135,7 +135,7 @@ _ref = (HeapWord*) Universe::boolArrayKlassObj(); _buckets = (KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets, - mtInternal, 0, AllocFailStrategy::RETURN_NULL); + mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL); if (_buckets != NULL) { _size = _num_buckets; for (int index = 0; index < _size; index++) { --- ./hotspot/src/share/vm/memory/iterator.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/iterator.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -27,6 +27,7 @@ #include "oops/oop.inline.hpp" void KlassToOopClosure::do_klass(Klass* k) { + assert(_oop_closure != NULL, "Not initialized?"); k->oops_do(_oop_closure); } @@ -34,6 +35,10 @@ cld->oops_do(_oop_closure, &_klass_closure, _must_claim_cld); } +void CLDToKlassAndOopClosure::do_cld(ClassLoaderData* cld) { + cld->oops_do(_oop_closure, _klass_closure, _must_claim_cld); +} + void ObjectToOopClosure::do_object(oop obj) { obj->oop_iterate(_cl); } @@ -42,6 +47,20 @@ ShouldNotCallThis(); } +void CodeBlobToOopClosure::do_nmethod(nmethod* nm) { + nm->oops_do(_cl); + if (_fix_relocations) { + nm->fix_oop_relocations(); + } +} + +void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) { + nmethod* nm = cb->as_nmethod_or_null(); + if (nm != NULL) { + do_nmethod(nm); + } +} + MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate) : _active(activate) { @@ -54,32 +73,7 @@ void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) { nmethod* nm = cb->as_nmethod_or_null(); - if (nm == NULL) return; - if (!nm->test_set_oops_do_mark()) { - NOT_PRODUCT(if (TraceScavenge) nm->print_on(tty, "oops_do, 1st visit\n")); - do_newly_marked_nmethod(nm); - } else { - NOT_PRODUCT(if (TraceScavenge) nm->print_on(tty, "oops_do, skipped on 2nd visit\n")); + if (nm != NULL && !nm->test_set_oops_do_mark()) { + do_nmethod(nm); } } - -void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) { - nm->oops_do(_cl, /*allow_zombie=*/ false); -} - -void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) { - if (!_do_marking) { - nmethod* nm = cb->as_nmethod_or_null(); - NOT_PRODUCT(if (TraceScavenge && Verbose && nm != NULL) nm->print_on(tty, "oops_do, unmarked visit\n")); - // This assert won't work, since there are lots of mini-passes - // (mostly in debug mode) that co-exist with marking phases. - //assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase"); - if (nm != NULL) { - nm->oops_do(_cl); - } - } else { - MarkingCodeBlobClosure::do_code_blob(cb); - } -} - - --- ./hotspot/src/share/vm/memory/iterator.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/iterator.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -84,8 +84,8 @@ // // Providing default implementations of the _nv functions unfortunately // removes the compile-time safeness, but reduces the clutter for the - // ExtendedOopClosures that don't need to walk the metadata. Currently, - // only CMS needs these. + // ExtendedOopClosures that don't need to walk the metadata. + // Currently, only CMS and G1 need these. virtual bool do_metadata() { return do_metadata_nv(); } bool do_metadata_v() { return do_metadata(); } @@ -128,17 +128,33 @@ virtual void do_klass(Klass* k) = 0; }; +class CLDClosure : public Closure { + public: + virtual void do_cld(ClassLoaderData* cld) = 0; +}; + class KlassToOopClosure : public KlassClosure { + friend class MetadataAwareOopClosure; + friend class MetadataAwareOopsInGenClosure; + OopClosure* _oop_closure; + + // Used when _oop_closure couldn't be set in an initialization list. + void initialize(OopClosure* oop_closure) { + assert(_oop_closure == NULL, "Should only be called once"); + _oop_closure = oop_closure; + } + public: - KlassToOopClosure(OopClosure* oop_closure) : _oop_closure(oop_closure) {} + KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {} + virtual void do_klass(Klass* k); }; -class CLDToOopClosure { - OopClosure* _oop_closure; +class CLDToOopClosure : public CLDClosure { + OopClosure* _oop_closure; KlassToOopClosure _klass_closure; - bool _must_claim_cld; + bool _must_claim_cld; public: CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) : @@ -149,6 +165,46 @@ void do_cld(ClassLoaderData* cld); }; +class CLDToKlassAndOopClosure : public CLDClosure { + friend class SharedHeap; + friend class G1CollectedHeap; + protected: + OopClosure* _oop_closure; + KlassClosure* _klass_closure; + bool _must_claim_cld; + public: + CLDToKlassAndOopClosure(KlassClosure* klass_closure, + OopClosure* oop_closure, + bool must_claim_cld) : + _oop_closure(oop_closure), + _klass_closure(klass_closure), + _must_claim_cld(must_claim_cld) {} + void do_cld(ClassLoaderData* cld); +}; + +// The base class for all concurrent marking closures, +// that participates in class unloading. +// It's used to proxy through the metadata to the oops defined in them. +class MetadataAwareOopClosure: public ExtendedOopClosure { + KlassToOopClosure _klass_closure; + + public: + MetadataAwareOopClosure() : ExtendedOopClosure() { + _klass_closure.initialize(this); + } + MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { + _klass_closure.initialize(this); + } + + virtual bool do_metadata() { return do_metadata_nv(); } + inline bool do_metadata_nv() { return true; } + + virtual void do_klass(Klass* k); + void do_klass_nv(Klass* k); + + virtual void do_class_loader_data(ClassLoaderData* cld); +}; + // ObjectClosure is used for iterating through an object space class ObjectClosure : public Closure { @@ -172,19 +228,6 @@ ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {} }; -// A version of ObjectClosure with "memory" (see _previous_address below) -class UpwardsObjectClosure: public BoolObjectClosure { - HeapWord* _previous_address; - public: - UpwardsObjectClosure() : _previous_address(NULL) { } - void set_previous(HeapWord* addr) { _previous_address = addr; } - HeapWord* previous() { return _previous_address; } - // A return value of "true" can be used by the caller to decide - // if this object's end should *NOT* be recorded in - // _previous_address above. - virtual bool do_object_bm(oop obj, MemRegion mr) = 0; -}; - // A version of ObjectClosure that is expected to be robust // in the face of possibly uninitialized objects. class ObjectClosureCareful : public ObjectClosure { @@ -240,14 +283,26 @@ virtual void do_code_blob(CodeBlob* cb) = 0; }; +// Applies an oop closure to all ref fields in code blobs +// iterated over in an object iteration. +class CodeBlobToOopClosure : public CodeBlobClosure { + OopClosure* _cl; + bool _fix_relocations; + protected: + void do_nmethod(nmethod* nm); + public: + CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {} + virtual void do_code_blob(CodeBlob* cb); -class MarkingCodeBlobClosure : public CodeBlobClosure { + const static bool FixRelocations = true; +}; + +class MarkingCodeBlobClosure : public CodeBlobToOopClosure { public: + MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {} // Called for each code blob, but at most once per unique blob. - virtual void do_newly_marked_nmethod(nmethod* nm) = 0; virtual void do_code_blob(CodeBlob* cb); - // = { if (!nmethod(cb)->test_set_oops_do_mark()) do_newly_marked_nmethod(cb); } class MarkScope : public StackObj { protected: @@ -260,23 +315,6 @@ }; }; - -// Applies an oop closure to all ref fields in code blobs -// iterated over in an object iteration. -class CodeBlobToOopClosure: public MarkingCodeBlobClosure { - OopClosure* _cl; - bool _do_marking; -public: - virtual void do_newly_marked_nmethod(nmethod* cb); - // = { cb->oops_do(_cl); } - virtual void do_code_blob(CodeBlob* cb); - // = { if (_do_marking) super::do_code_blob(cb); else cb->oops_do(_cl); } - CodeBlobToOopClosure(OopClosure* cl, bool do_marking) - : _cl(cl), _do_marking(do_marking) {} -}; - - - // MonitorClosure is used for iterating over monitors in the monitors cache class ObjectMonitor; @@ -345,4 +383,16 @@ } }; + +// Helper defines for ExtendOopClosure + +#define if_do_metadata_checked(closure, nv_suffix) \ + /* Make sure the non-virtual and the virtual versions match. */ \ + assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ + "Inconsistency in do_metadata"); \ + if (closure->do_metadata##nv_suffix()) + +#define assert_should_ignore_metadata(closure, nv_suffix) \ + assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented") + #endif // SHARE_VM_MEMORY_ITERATOR_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/memory/iterator.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_ITERATOR_INLINE_HPP +#define SHARE_VM_MEMORY_ITERATOR_INLINE_HPP + +#include "classfile/classLoaderData.hpp" +#include "memory/iterator.hpp" +#include "oops/klass.hpp" +#include "utilities/debug.hpp" + +inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) { + assert(_klass_closure._oop_closure == this, "Must be"); + + bool claim = true; // Must claim the class loader data before processing. + cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim); +} + +inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) { + ClassLoaderData* cld = k->class_loader_data(); + do_class_loader_data(cld); +} + +inline void MetadataAwareOopClosure::do_klass(Klass* k) { do_klass_nv(k); } + +#endif // SHARE_VM_MEMORY_ITERATOR_INLINE_HPP --- ./hotspot/src/share/vm/memory/memRegion.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/memRegion.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,11 +103,13 @@ } void* MemRegion::operator new(size_t size) throw() { - return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL); + return (address)AllocateHeap(size, mtGC, CURRENT_PC, + AllocFailStrategy::RETURN_NULL); } void* MemRegion::operator new [](size_t size) throw() { - return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL); + return (address)AllocateHeap(size, mtGC, CURRENT_PC, + AllocFailStrategy::RETURN_NULL); } void MemRegion::operator delete(void* p) { FreeHeap(p, mtGC); --- ./hotspot/src/share/vm/memory/metadataFactory.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/metadataFactory.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_MEMORY_METADATAFACTORY_HPP #define SHARE_VM_MEMORY_METADATAFACTORY_HPP +#include "classfile/classLoaderData.hpp" #include "utilities/array.hpp" #include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" @@ -63,6 +64,12 @@ template static void free_array(ClassLoaderData* loader_data, Array* data) { + if (DumpSharedSpaces) { + // FIXME: the freeing code is buggy, especially when PrintSharedSpaces is enabled. + // Disable for now -- this means if you specify bad classes in your classlist you + // may have wasted space inside the archive. + return; + } if (data != NULL) { assert(loader_data != NULL, "shouldn't pass null"); assert(!data->is_shared(), "cannot deallocate array in shared spaces"); @@ -78,6 +85,12 @@ // Deallocation method for metadata template static void free_metadata(ClassLoaderData* loader_data, T md) { + if (DumpSharedSpaces) { + // FIXME: the freeing code is buggy, especially when PrintSharedSpaces is enabled. + // Disable for now -- this means if you specify bad classes in your classlist you + // may have wasted space inside the archive. + return; + } if (md != NULL) { assert(loader_data != NULL, "shouldn't pass null"); int size = md->size(); --- ./hotspot/src/share/vm/memory/metaspace.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/metaspace.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -42,7 +42,7 @@ #include "runtime/init.hpp" #include "runtime/java.hpp" #include "runtime/mutex.hpp" -#include "runtime/orderAccess.hpp" +#include "runtime/orderAccess.inline.hpp" #include "services/memTracker.hpp" #include "services/memoryService.hpp" #include "utilities/copy.hpp" @@ -413,6 +413,7 @@ VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); +#if INCLUDE_CDS // This allocates memory with mmap. For DumpSharedspaces, try to reserve // configurable address, generally at the top of the Java heap so other // memory addresses don't conflict. @@ -428,7 +429,9 @@ _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); } MetaspaceShared::set_shared_rs(&_rs); - } else { + } else +#endif + { bool large_pages = should_commit_large_pages_when_reserving(bytes); _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); @@ -1411,10 +1414,31 @@ return value; } -size_t MetaspaceGC::inc_capacity_until_GC(size_t v) { +bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { assert_is_size_aligned(v, Metaspace::commit_alignment()); - return (size_t)Atomic::add_ptr(v, &_capacity_until_GC); + size_t capacity_until_GC = (size_t) _capacity_until_GC; + size_t new_value = capacity_until_GC + v; + + if (new_value < capacity_until_GC) { + // The addition wrapped around, set new_value to aligned max value. + new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); + } + + intptr_t expected = (intptr_t) capacity_until_GC; + intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); + + if (expected != actual) { + return false; + } + + if (new_cap_until_GC != NULL) { + *new_cap_until_GC = new_value; + } + if (old_cap_until_GC != NULL) { + *old_cap_until_GC = capacity_until_GC; + } + return true; } size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { @@ -1514,7 +1538,10 @@ expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); // Don't expand unless it's significant if (expand_bytes >= MinMetaspaceExpansion) { - size_t new_capacity_until_GC = MetaspaceGC::inc_capacity_until_GC(expand_bytes); + size_t new_capacity_until_GC = 0; + bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); + assert(succeeded, "Should always succesfully increment HWM when at safepoint"); + Metaspace::tracer()->report_gc_threshold(capacity_until_GC, new_capacity_until_GC, MetaspaceGCThresholdUpdater::ComputeNewSize); @@ -2937,11 +2964,14 @@ // between the lower base and higher address. address lower_base; address higher_address; +#if INCLUDE_CDS if (UseSharedSpaces) { higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), (address)(metaspace_base + compressed_class_space_size())); lower_base = MIN2(metaspace_base, cds_base); - } else { + } else +#endif + { higher_address = metaspace_base + compressed_class_space_size(); lower_base = metaspace_base; @@ -2962,6 +2992,7 @@ } } +#if INCLUDE_CDS // Return TRUE if the specified metaspace_base and cds_base are close enough // to work with compressed klass pointers. bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { @@ -2972,6 +3003,7 @@ (address)(metaspace_base + compressed_class_space_size())); return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); } +#endif // Try to allocate the metaspace at the requested addr. void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { @@ -2991,6 +3023,7 @@ large_pages, requested_addr, 0); if (!metaspace_rs.is_reserved()) { +#if INCLUDE_CDS if (UseSharedSpaces) { size_t increment = align_size_up(1*G, _reserve_alignment); @@ -3005,7 +3038,7 @@ _reserve_alignment, large_pages, addr, 0); } } - +#endif // If no successful allocation then try to allocate the space anywhere. If // that fails then OOM doom. At this point we cannot try allocating the // metaspace as if UseCompressedClassPointers is off because too much @@ -3024,12 +3057,13 @@ // If we got here then the metaspace got allocated. MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); +#if INCLUDE_CDS // Verify that we can use shared spaces. Otherwise, turn off CDS. if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { FileMapInfo::stop_sharing_and_unmap( "Could not allocate metaspace at a compatible address"); } - +#endif set_narrow_klass_base_and_shift((address)metaspace_rs.base(), UseSharedSpaces ? (address)cds_base : 0); @@ -3107,17 +3141,30 @@ MetaspaceGC::initialize(); // Initialize the alignment for shared spaces. - int max_alignment = os::vm_page_size(); + int max_alignment = os::vm_allocation_granularity(); size_t cds_total = 0; MetaspaceShared::set_max_alignment(max_alignment); if (DumpSharedSpaces) { +#if INCLUDE_CDS + MetaspaceShared::estimate_regions_size(); + SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); + // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods() + uintx min_misc_code_size = align_size_up( + (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) * + (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size, + max_alignment); + + if (SharedMiscCodeSize < min_misc_code_size) { + report_out_of_shared_space(SharedMiscCode); + } + // Initialize with the sum of the shared space sizes. The read-only // and read write metaspace chunks will be allocated out of this and the // remainder is the misc code and data chunks. @@ -3150,23 +3197,22 @@ } Universe::set_narrow_klass_shift(0); -#endif - +#endif // _LP64 +#endif // INCLUDE_CDS } else { +#if INCLUDE_CDS // If using shared space, open the file that contains the shared space // and map in the memory before initializing the rest of metaspace (so // the addresses don't conflict) address cds_address = NULL; if (UseSharedSpaces) { FileMapInfo* mapinfo = new FileMapInfo(); - memset(mapinfo, 0, sizeof(FileMapInfo)); // Open the shared archive file, read and validate the header. If // initialization fails, shared spaces [UseSharedSpaces] are // disabled and the file is closed. // Map in spaces now also if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { - FileMapInfo::set_current_info(mapinfo); cds_total = FileMapInfo::shared_spaces_size(); cds_address = (address)mapinfo->region_base(0); } else { @@ -3174,21 +3220,23 @@ "archive file not closed or shared spaces not disabled."); } } - +#endif // INCLUDE_CDS #ifdef _LP64 // If UseCompressedClassPointers is set then allocate the metaspace area // above the heap and above the CDS area (if it exists). if (using_class_space()) { if (UseSharedSpaces) { +#if INCLUDE_CDS char* cds_end = (char*)(cds_address + cds_total); cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); +#endif } else { char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); allocate_metaspace_compressed_klass_ptrs(base, 0); } } -#endif +#endif // _LP64 // Initialize these before initializing the VirtualSpaceList _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; @@ -3305,19 +3353,29 @@ size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); assert(delta_bytes > 0, "Must be"); - size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes); - - // capacity_until_GC might be updated concurrently, must calculate previous value. - size_t before_inc = after_inc - delta_bytes; - - tracer()->report_gc_threshold(before_inc, after_inc, - MetaspaceGCThresholdUpdater::ExpandAndAllocate); - if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT - " to " SIZE_FORMAT, before_inc, after_inc); + size_t before = 0; + size_t after = 0; + MetaWord* res; + bool incremented; + + // Each thread increments the HWM at most once. Even if the thread fails to increment + // the HWM, an allocation is still attempted. This is because another thread must then + // have incremented the HWM and therefore the allocation might still succeed. + do { + incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); + res = allocate(word_size, mdtype); + } while (!incremented && res == NULL); + + if (incremented) { + tracer()->report_gc_threshold(before, after, + MetaspaceGCThresholdUpdater::ExpandAndAllocate); + if (PrintGCDetails && Verbose) { + gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT + " to " SIZE_FORMAT, before, after); + } } - return allocate(word_size, mdtype); + return res; } // Space allocated in the Metaspace. This may @@ -3366,6 +3424,10 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { if (SafepointSynchronize::is_at_safepoint()) { + if (DumpSharedSpaces && PrintSharedSpaces) { + record_deallocation(ptr, vsm()->get_raw_word_size(word_size)); + } + assert(Thread::current()->is_VM_thread(), "should be the VM thread"); // Don't take Heap_lock MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); @@ -3420,8 +3482,9 @@ if (result == NULL) { report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); } - - space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); + if (PrintSharedSpaces) { + space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); + } // Zero initialize. Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); @@ -3520,15 +3583,55 @@ void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { assert(DumpSharedSpaces, "sanity"); - AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize); + int byte_size = (int)word_size * HeapWordSize; + AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); + if (_alloc_record_head == NULL) { _alloc_record_head = _alloc_record_tail = rec; - } else { + } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { _alloc_record_tail->_next = rec; _alloc_record_tail = rec; + } else { + // slow linear search, but this doesn't happen that often, and only when dumping + for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { + if (old->_ptr == ptr) { + assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); + int remain_bytes = old->_byte_size - byte_size; + assert(remain_bytes >= 0, "sanity"); + old->_type = type; + + if (remain_bytes == 0) { + delete(rec); + } else { + address remain_ptr = address(ptr) + byte_size; + rec->_ptr = remain_ptr; + rec->_byte_size = remain_bytes; + rec->_type = MetaspaceObj::DeallocatedType; + rec->_next = old->_next; + old->_byte_size = byte_size; + old->_next = rec; + } + return; + } + } + assert(0, "reallocating a freed pointer that was not recorded"); } } +void Metaspace::record_deallocation(void* ptr, size_t word_size) { + assert(DumpSharedSpaces, "sanity"); + + for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { + if (rec->_ptr == ptr) { + assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity"); + rec->_type = MetaspaceObj::DeallocatedType; + return; + } + } + + assert(0, "deallocating a pointer that was not recorded"); +} + void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); --- ./hotspot/src/share/vm/memory/metaspace.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/metaspace.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -171,9 +171,10 @@ static const MetaspaceTracer* tracer() { return _tracer; } private: - // This is used by DumpSharedSpaces only, where only _vsm is used. So we will + // These 2 methods are used by DumpSharedSpaces only, where only _vsm is used. So we will // maintain a single list for now. void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size); + void record_deallocation(void* ptr, size_t word_size); #ifdef _LP64 static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base); @@ -403,7 +404,9 @@ static void post_initialize(); static size_t capacity_until_GC(); - static size_t inc_capacity_until_GC(size_t v); + static bool inc_capacity_until_GC(size_t v, + size_t* new_cap_until_GC = NULL, + size_t* old_cap_until_GC = NULL); static size_t dec_capacity_until_GC(size_t v); static bool should_concurrent_collect() { return _should_concurrent_collect; } --- ./hotspot/src/share/vm/memory/metaspaceShared.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/metaspaceShared.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,8 +24,10 @@ #include "precompiled.hpp" #include "classfile/dictionary.hpp" +#include "classfile/classLoaderExt.hpp" #include "classfile/loaderConstraints.hpp" #include "classfile/placeholders.hpp" +#include "classfile/sharedClassUtil.hpp" #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" @@ -38,6 +40,7 @@ #include "runtime/signature.hpp" #include "runtime/vm_operations.hpp" #include "runtime/vmThread.hpp" +#include "utilities/hashtable.hpp" #include "utilities/hashtable.inline.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -46,6 +49,10 @@ ReservedSpace* MetaspaceShared::_shared_rs = NULL; +bool MetaspaceShared::_link_classes_made_progress; +bool MetaspaceShared::_check_classes_made_progress; +bool MetaspaceShared::_has_error_classes; +bool MetaspaceShared::_archive_loading_failed = false; // Read/write a data stream for restoring/preserving metadata pointers and // miscellaneous data from/to the shared archive file. @@ -445,6 +452,23 @@ SystemDictionary::classes_do(collect_classes); tty->print_cr("Number of classes %d", _global_klass_objects->length()); + { + int num_type_array = 0, num_obj_array = 0, num_inst = 0; + for (int i = 0; i < _global_klass_objects->length(); i++) { + Klass* k = _global_klass_objects->at(i); + if (k->oop_is_instance()) { + num_inst ++; + } else if (k->oop_is_objArray()) { + num_obj_array ++; + } else { + assert(k->oop_is_typeArray(), "sanity"); + num_type_array ++; + } + } + tty->print_cr(" instance classes = %5d", num_inst); + tty->print_cr(" obj array classes = %5d", num_obj_array); + tty->print_cr(" type array classes = %5d", num_type_array); + } // Update all the fingerprints in the shared methods. tty->print("Calculating fingerprints ... "); @@ -511,6 +535,8 @@ ClassLoader::copy_package_info_table(&md_top, md_end); ClassLoader::verify(); + ClassLoaderExt::copy_lookup_cache_to_archive(&md_top, md_end); + // Write the other data to the output array. WriteClosure wc(md_top, md_end); MetaspaceShared::serialize(&wc); @@ -611,38 +637,58 @@ #undef fmt_space } -static void link_shared_classes(Klass* obj, TRAPS) { + +void MetaspaceShared::link_one_shared_class(Klass* obj, TRAPS) { Klass* k = obj; if (k->oop_is_instance()) { InstanceKlass* ik = (InstanceKlass*) k; // Link the class to cause the bytecodes to be rewritten and the - // cpcache to be created. - if (ik->init_state() < InstanceKlass::linked) { - ik->link_class(THREAD); - guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting"); + // cpcache to be created. Class verification is done according + // to -Xverify setting. + _link_classes_made_progress |= try_link_class(ik, THREAD); + guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); + } +} + +void MetaspaceShared::check_one_shared_class(Klass* k) { + if (k->oop_is_instance() && InstanceKlass::cast(k)->check_sharing_error_state()) { + _check_classes_made_progress = true; + } +} + +void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { + // We need to iterate because verification may cause additional classes + // to be loaded. + do { + _link_classes_made_progress = false; + SystemDictionary::classes_do(link_one_shared_class, THREAD); + guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); + } while (_link_classes_made_progress); + + if (_has_error_classes) { + // Mark all classes whose super class or interfaces failed verification. + do { + // Not completely sure if we need to do this iteratively. Anyway, + // we should come here only if there are unverifiable classes, which + // shouldn't happen in normal cases. So better safe than sorry. + _check_classes_made_progress = false; + SystemDictionary::classes_do(check_one_shared_class); + } while (_check_classes_made_progress); + + if (IgnoreUnverifiableClassesDuringDump) { + // This is useful when running JCK or SQE tests. You should not + // enable this when running real apps. + SystemDictionary::remove_classes_in_error_state(); + } else { + tty->print_cr("Please remove the unverifiable classes from your class list and try again"); + exit(1); } } } - -// Support for a simple checksum of the contents of the class list -// file to prevent trivial tampering. The algorithm matches that in -// the MakeClassList program used by the J2SE build process. -#define JSUM_SEED ((jlong)CONST64(0xcafebabebabecafe)) -static jlong -jsum(jlong start, const char *buf, const int len) -{ - jlong h = start; - char *p = (char *)buf, *e = p + len; - while (p < e) { - char c = *p++; - if (c <= ' ') { - /* Skip spaces and control characters */ - continue; - } - h = 31 * h + c; - } - return h; +void MetaspaceShared::prepare_for_dumping() { + ClassLoader::initialize_shared_path(); + FileMapInfo::allocate_classpath_entry_table(); } // Preload classes from a list, populate the shared spaces and dump to a @@ -651,72 +697,114 @@ TraceTime timer("Dump Shared Spaces", TraceStartupTime); ResourceMark rm; + tty->print_cr("Allocated shared space: %d bytes at " PTR_FORMAT, + MetaspaceShared::shared_rs()->size(), + MetaspaceShared::shared_rs()->base()); + // Preload classes to be shared. // Should use some os:: method rather than fopen() here. aB. - // Construct the path to the class list (in jre/lib) - // Walk up two directories from the location of the VM and - // optionally tack on "lib" (depending on platform) - char class_list_path[JVM_MAXPATHLEN]; - os::jvm_path(class_list_path, sizeof(class_list_path)); - for (int i = 0; i < 3; i++) { - char *end = strrchr(class_list_path, *os::file_separator()); - if (end != NULL) *end = '\0'; + const char* class_list_path; + if (SharedClassListFile == NULL) { + // Construct the path to the class list (in jre/lib) + // Walk up two directories from the location of the VM and + // optionally tack on "lib" (depending on platform) + char class_list_path_str[JVM_MAXPATHLEN]; + os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); + for (int i = 0; i < 3; i++) { + char *end = strrchr(class_list_path_str, *os::file_separator()); + if (end != NULL) *end = '\0'; + } + int class_list_path_len = (int)strlen(class_list_path_str); + if (class_list_path_len >= 3) { + if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { + strcat(class_list_path_str, os::file_separator()); + strcat(class_list_path_str, "lib"); + } + } + strcat(class_list_path_str, os::file_separator()); + strcat(class_list_path_str, "classlist"); + class_list_path = class_list_path_str; + } else { + class_list_path = SharedClassListFile; } - int class_list_path_len = (int)strlen(class_list_path); - if (class_list_path_len >= 3) { - if (strcmp(class_list_path + class_list_path_len - 3, "lib") != 0) { - strcat(class_list_path, os::file_separator()); - strcat(class_list_path, "lib"); - } + + int class_count = 0; + GrowableArray* class_promote_order = new GrowableArray(); + + // sun.io.Converters + static const char obj_array_sig[] = "[[Ljava/lang/Object;"; + SymbolTable::new_permanent_symbol(obj_array_sig, THREAD); + + // java.util.HashMap + static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;"; + SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD); + + tty->print_cr("Loading classes to share ..."); + _has_error_classes = false; + class_count += preload_and_dump(class_list_path, class_promote_order, + THREAD); + if (ExtraSharedClassListFile) { + class_count += preload_and_dump(ExtraSharedClassListFile, class_promote_order, + THREAD); } - strcat(class_list_path, os::file_separator()); - strcat(class_list_path, "classlist"); + tty->print_cr("Loading classes to share: done."); + ClassLoaderExt::init_lookup_cache(THREAD); + + if (PrintSharedSpaces) { + tty->print_cr("Shared spaces: preloaded %d classes", class_count); + } + + // Rewrite and link classes + tty->print_cr("Rewriting and linking classes ..."); + + // Link any classes which got missed. This would happen if we have loaded classes that + // were not explicitly specified in the classlist. E.g., if an interface implemented by class K + // fails verification, all other interfaces that were not specified in the classlist but + // are implemented by K are not verified. + link_and_cleanup_shared_classes(CATCH); + tty->print_cr("Rewriting and linking classes: done"); + + // Create and dump the shared spaces. Everything so far is loaded + // with the null class loader. + ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data(); + VM_PopulateDumpSharedSpace op(loader_data, class_promote_order); + VMThread::execute(&op); + + // Since various initialization steps have been undone by this process, + // it is not reasonable to continue running a java process. + exit(0); +} + +int MetaspaceShared::preload_and_dump(const char * class_list_path, + GrowableArray* class_promote_order, + TRAPS) { FILE* file = fopen(class_list_path, "r"); + char class_name[256]; + int class_count = 0; + if (file != NULL) { - jlong computed_jsum = JSUM_SEED; - jlong file_jsum = 0; - - char class_name[256]; - int class_count = 0; - GrowableArray* class_promote_order = new GrowableArray(); - - // sun.io.Converters - static const char obj_array_sig[] = "[[Ljava/lang/Object;"; - SymbolTable::new_permanent_symbol(obj_array_sig, THREAD); - - // java.util.HashMap - static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;"; - SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD); - - tty->print("Loading classes to share ... "); while ((fgets(class_name, sizeof class_name, file)) != NULL) { - if (*class_name == '#') { - jint fsh, fsl; - if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) { - file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff); - } - + if (*class_name == '#') { // comment continue; } // Remove trailing newline size_t name_len = strlen(class_name); - class_name[name_len-1] = '\0'; - - computed_jsum = jsum(computed_jsum, class_name, (const int)name_len - 1); + if (class_name[name_len-1] == '\n') { + class_name[name_len-1] = '\0'; + } // Got a class name - load it. TempNewSymbol class_name_symbol = SymbolTable::new_permanent_symbol(class_name, THREAD); guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol."); Klass* klass = SystemDictionary::resolve_or_null(class_name_symbol, THREAD); - guarantee(!HAS_PENDING_EXCEPTION, "Exception resolving a class."); + CLEAR_PENDING_EXCEPTION; if (klass != NULL) { if (PrintSharedSpaces && Verbose && WizardMode) { tty->print_cr("Shared spaces preloaded: %s", class_name); } - InstanceKlass* ik = InstanceKlass::cast(klass); // Should be class load order as per -XX:+TraceClassLoadingPreorder @@ -726,52 +814,15 @@ // cpcache to be created. The linking is done as soon as classes // are loaded in order that the related data structures (klass and // cpCache) are located together. - - if (ik->init_state() < InstanceKlass::linked) { - ik->link_class(THREAD); - guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting"); - } - - // TODO: Resolve klasses in constant pool - ik->constants()->resolve_class_constants(THREAD); + try_link_class(ik, THREAD); + guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class"); class_count++; } else { - if (PrintSharedSpaces && Verbose && WizardMode) { - tty->cr(); - tty->print_cr(" Preload failed: %s", class_name); - } + //tty->print_cr("Preload failed: %s", class_name); } - file_jsum = 0; // Checksum must be on last line of file } - if (computed_jsum != file_jsum) { - tty->cr(); - tty->print_cr("Preload failed: checksum of class list was incorrect."); - exit(1); - } - - tty->print_cr("done. "); - - if (PrintSharedSpaces) { - tty->print_cr("Shared spaces: preloaded %d classes", class_count); - } - - // Rewrite and unlink classes. - tty->print("Rewriting and linking classes ... "); - - // Link any classes which got missed. (It's not quite clear why - // they got missed.) This iteration would be unsafe if we weren't - // single-threaded at this point; however we can't do it on the VM - // thread because it requires object allocation. - SystemDictionary::classes_do(link_shared_classes, CATCH); - tty->print_cr("done. "); - - // Create and dump the shared spaces. Everything so far is loaded - // with the null class loader. - ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data(); - VM_PopulateDumpSharedSpace op(loader_data, class_promote_order); - VMThread::execute(&op); - + fclose(file); } else { char errmsg[JVM_MAXPATHLEN]; os::lasterror(errmsg, JVM_MAXPATHLEN); @@ -779,11 +830,39 @@ exit(1); } - // Since various initialization steps have been undone by this process, - // it is not reasonable to continue running a java process. - exit(0); + return class_count; } +// Returns true if the class's status has changed +bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { + assert(DumpSharedSpaces, "should only be called during dumping"); + if (ik->init_state() < InstanceKlass::linked) { + bool saved = BytecodeVerificationLocal; + if (!SharedClassUtil::is_shared_boot_class(ik)) { + // The verification decision is based on BytecodeVerificationRemote + // for non-system classes. Since we are using the NULL classloader + // to load non-system classes during dumping, we need to temporarily + // change BytecodeVerificationLocal to be the same as + // BytecodeVerificationRemote. Note this can cause the parent system + // classes also being verified. The extra overhead is acceptable during + // dumping. + BytecodeVerificationLocal = BytecodeVerificationRemote; + } + ik->link_class(THREAD); + if (HAS_PENDING_EXCEPTION) { + ResourceMark rm; + tty->print_cr("Preload Warning: Verification failed for %s", + ik->external_name()); + CLEAR_PENDING_EXCEPTION; + ik->set_in_error_state(); + _has_error_classes = true; + } + BytecodeVerificationLocal = saved; + return true; + } else { + return false; + } +} // Closure for serializing initialization data in from a data area // (ptr_array) read from the shared file. @@ -871,7 +950,8 @@ mapinfo->verify_region_checksum(md) && (_mc_base = mapinfo->map_region(mc)) != NULL && mapinfo->verify_region_checksum(mc) && - (image_alignment == (size_t)max_alignment())) { + (image_alignment == (size_t)max_alignment()) && + mapinfo->validate_classpath_entry_table()) { // Success (no need to do anything) return true; } else { @@ -887,8 +967,8 @@ #endif // If -Xshare:on is specified, print out the error message and exit VM, // otherwise, set UseSharedSpaces to false and continue. - if (RequireSharedSpaces) { - vm_exit_during_initialization("Unable to use shared archive.", NULL); + if (RequireSharedSpaces || PrintSharedArchiveAndExit) { + vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on."); } else { FLAG_SET_DEFAULT(UseSharedSpaces, false); } @@ -982,12 +1062,28 @@ buffer += sizeof(intptr_t); buffer += len; + buffer = ClassLoaderExt::restore_lookup_cache_from_archive(buffer); + intptr_t* array = (intptr_t*)buffer; ReadClosure rc(&array); serialize(&rc); // Close the mapinfo file mapinfo->close(); + + if (PrintSharedArchiveAndExit) { + if (PrintSharedDictionary) { + tty->print_cr("\nShared classes:\n"); + SystemDictionary::print_shared(false); + } + if (_archive_loading_failed) { + tty->print_cr("archive is invalid"); + vm_exit(1); + } else { + tty->print_cr("archive is valid"); + vm_exit(0); + } + } } // JVM/TI RedefineClasses() support: @@ -1003,3 +1099,49 @@ } return true; } + +int MetaspaceShared::count_class(const char* classlist_file) { + if (classlist_file == NULL) { + return 0; + } + char class_name[256]; + int class_count = 0; + FILE* file = fopen(classlist_file, "r"); + if (file != NULL) { + while ((fgets(class_name, sizeof class_name, file)) != NULL) { + if (*class_name == '#') { // comment + continue; + } + class_count++; + } + fclose(file); + } else { + char errmsg[JVM_MAXPATHLEN]; + os::lasterror(errmsg, JVM_MAXPATHLEN); + tty->print_cr("Loading classlist failed: %s", errmsg); + exit(1); + } + + return class_count; +} + +// the sizes are good for typical large applications that have a lot of shared +// classes +void MetaspaceShared::estimate_regions_size() { + int class_count = count_class(SharedClassListFile); + class_count += count_class(ExtraSharedClassListFile); + + if (class_count > LargeThresholdClassCount) { + if (class_count < HugeThresholdClassCount) { + SET_ESTIMATED_SIZE(Large, ReadOnly); + SET_ESTIMATED_SIZE(Large, ReadWrite); + SET_ESTIMATED_SIZE(Large, MiscData); + SET_ESTIMATED_SIZE(Large, MiscCode); + } else { + SET_ESTIMATED_SIZE(Huge, ReadOnly); + SET_ESTIMATED_SIZE(Huge, ReadWrite); + SET_ESTIMATED_SIZE(Huge, MiscData); + SET_ESTIMATED_SIZE(Huge, MiscCode); + } + } +} --- ./hotspot/src/share/vm/memory/metaspaceShared.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/metaspaceShared.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -30,6 +30,19 @@ #include "utilities/exceptions.hpp" #include "utilities/macros.hpp" +#define LargeSharedArchiveSize (300*M) +#define HugeSharedArchiveSize (800*M) +#define ReadOnlyRegionPercentage 0.39 +#define ReadWriteRegionPercentage 0.50 +#define MiscDataRegionPercentage 0.09 +#define MiscCodeRegionPercentage 0.02 +#define LargeThresholdClassCount 5000 +#define HugeThresholdClassCount 40000 + +#define SET_ESTIMATED_SIZE(type, region) \ + Shared ##region## Size = FLAG_IS_DEFAULT(Shared ##region## Size) ? \ + (uintx)(type ## SharedArchiveSize * region ## RegionPercentage) : Shared ## region ## Size + class FileMapInfo; // Class Data Sharing Support @@ -38,14 +51,22 @@ // CDS support static ReservedSpace* _shared_rs; static int _max_alignment; - + static bool _link_classes_made_progress; + static bool _check_classes_made_progress; + static bool _has_error_classes; + static bool _archive_loading_failed; public: enum { - vtbl_list_size = 17, // number of entries in the shared space vtable list. - num_virtuals = 200 // maximum number of virtual functions - // If virtual functions are added to Metadata, - // this number needs to be increased. Also, - // SharedMiscCodeSize will need to be increased. + vtbl_list_size = 17, // number of entries in the shared space vtable list. + num_virtuals = 200, // maximum number of virtual functions + // If virtual functions are added to Metadata, + // this number needs to be increased. Also, + // SharedMiscCodeSize will need to be increased. + // The following 2 sizes were based on + // MetaspaceShared::generate_vtable_methods() + vtbl_method_size = 16, // conservative size of the mov1 and jmp instructions + // for the x64 platform + vtbl_common_code_size = (1*K) // conservative size of the "common_code" for the x64 platform }; enum { @@ -67,7 +88,11 @@ NOT_CDS(return 0); } + static void prepare_for_dumping() NOT_CDS_RETURN; static void preload_and_dump(TRAPS) NOT_CDS_RETURN; + static int preload_and_dump(const char * class_list_path, + GrowableArray* class_promote_order, + TRAPS) NOT_CDS_RETURN; static ReservedSpace* shared_rs() { CDS_ONLY(return _shared_rs); @@ -78,6 +103,9 @@ CDS_ONLY(_shared_rs = rs;) } + static void set_archive_loading_failed() { + _archive_loading_failed = true; + } static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false); static void initialize_shared_spaces() NOT_CDS_RETURN; @@ -97,5 +125,13 @@ static bool remap_shared_readonly_as_readwrite() NOT_CDS_RETURN_(true); static void print_shared_spaces(); + + static bool try_link_class(InstanceKlass* ik, TRAPS); + static void link_one_shared_class(Klass* obj, TRAPS); + static void check_one_shared_class(Klass* obj); + static void link_and_cleanup_shared_classes(TRAPS); + + static int count_class(const char* classlist_file); + static void estimate_regions_size() NOT_CDS_RETURN; }; #endif // SHARE_VM_MEMORY_METASPACE_SHARED_HPP --- ./hotspot/src/share/vm/memory/referenceProcessor.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/referenceProcessor.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -191,7 +191,8 @@ OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor, - GCTimer* gc_timer) { + GCTimer* gc_timer, + GCId gc_id) { NOT_PRODUCT(verify_ok_to_handle_reflists()); assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); @@ -213,7 +214,7 @@ // Soft references size_t soft_count = 0; { - GCTraceTime tt("SoftReference", trace_time, false, gc_timer); + GCTraceTime tt("SoftReference", trace_time, false, gc_timer, gc_id); soft_count = process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, is_alive, keep_alive, complete_gc, task_executor); @@ -224,7 +225,7 @@ // Weak references size_t weak_count = 0; { - GCTraceTime tt("WeakReference", trace_time, false, gc_timer); + GCTraceTime tt("WeakReference", trace_time, false, gc_timer, gc_id); weak_count = process_discovered_reflist(_discoveredWeakRefs, NULL, true, is_alive, keep_alive, complete_gc, task_executor); @@ -233,7 +234,7 @@ // Final references size_t final_count = 0; { - GCTraceTime tt("FinalReference", trace_time, false, gc_timer); + GCTraceTime tt("FinalReference", trace_time, false, gc_timer, gc_id); final_count = process_discovered_reflist(_discoveredFinalRefs, NULL, false, is_alive, keep_alive, complete_gc, task_executor); @@ -242,7 +243,7 @@ // Phantom references size_t phantom_count = 0; { - GCTraceTime tt("PhantomReference", trace_time, false, gc_timer); + GCTraceTime tt("PhantomReference", trace_time, false, gc_timer, gc_id); phantom_count = process_discovered_reflist(_discoveredPhantomRefs, NULL, false, is_alive, keep_alive, complete_gc, task_executor); @@ -261,7 +262,7 @@ // thus use JNI weak references to circumvent the phantom references and // resurrect a "post-mortem" object. { - GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer); + GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id); if (task_executor != NULL) { task_executor->set_single_threaded_mode(); } @@ -1263,14 +1264,15 @@ OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield, - GCTimer* gc_timer) { + GCTimer* gc_timer, + GCId gc_id) { NOT_PRODUCT(verify_ok_to_handle_reflists()); // Soft references { GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer); + false, gc_timer, gc_id); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1283,7 +1285,7 @@ // Weak references { GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer); + false, gc_timer, gc_id); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1296,7 +1298,7 @@ // Final references { GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer); + false, gc_timer, gc_id); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1309,7 +1311,7 @@ // Phantom references { GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer); + false, gc_timer, gc_id); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; --- ./hotspot/src/share/vm/memory/referenceProcessor.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/referenceProcessor.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP +#include "gc_implementation/shared/gcTrace.hpp" #include "memory/referencePolicy.hpp" #include "memory/referenceProcessorStats.hpp" #include "memory/referenceType.hpp" @@ -350,7 +351,8 @@ OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield, - GCTimer* gc_timer); + GCTimer* gc_timer, + GCId gc_id); // Delete entries in the discovered lists that have // either a null referent or are not active. Such @@ -481,7 +483,8 @@ OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor, - GCTimer *gc_timer); + GCTimer *gc_timer, + GCId gc_id); // Enqueue references at end of GC (called by the garbage collector) bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); --- ./hotspot/src/share/vm/memory/resourceArea.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/resourceArea.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,11 +49,11 @@ debug_only(static int _warned;) // to suppress multiple warnings public: - ResourceArea() { + ResourceArea() : Arena(mtThread) { debug_only(_nesting = 0;) } - ResourceArea(size_t init_size) : Arena(init_size) { + ResourceArea(size_t init_size) : Arena(mtThread, init_size) { debug_only(_nesting = 0;); } @@ -64,7 +64,7 @@ if (UseMallocOnly) { // use malloc, but save pointer in res. area for later freeing char** save = (char**)internal_malloc_4(sizeof(char*)); - return (*save = (char*)os::malloc(size, mtThread)); + return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC)); } #endif return (char*)Amalloc(size, alloc_failmode); --- ./hotspot/src/share/vm/memory/sharedHeap.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/sharedHeap.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,6 +29,7 @@ #include "gc_interface/collectedHeap.inline.hpp" #include "memory/sharedHeap.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/fprofiler.hpp" #include "runtime/java.hpp" #include "services/management.hpp" @@ -39,8 +40,8 @@ SharedHeap* SharedHeap::_sh; -// The set of potentially parallel tasks in strong root scanning. -enum SH_process_strong_roots_tasks { +// The set of potentially parallel tasks in root scanning. +enum SH_process_roots_tasks { SH_PS_Universe_oops_do, SH_PS_JNIHandles_oops_do, SH_PS_ObjectSynchronizer_oops_do, @@ -58,6 +59,7 @@ CollectedHeap(), _collector_policy(policy_), _rem_set(NULL), + _strong_roots_scope(NULL), _strong_roots_parity(0), _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)), _workers(NULL) @@ -114,6 +116,19 @@ static AssertNonScavengableClosure assert_is_non_scavengable_closure; #endif +SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const { + return _strong_roots_scope; +} +void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) { + assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active"); + assert(scope != NULL, "Illegal argument"); + _strong_roots_scope = scope; +} +void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) { + assert(_strong_roots_scope == scope, "Wrong scope unregistered"); + _strong_roots_scope = NULL; +} + void SharedHeap::change_strong_roots_parity() { // Also set the new collection parity. assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2, @@ -124,122 +139,173 @@ "Not in range."); } -SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate) - : MarkScope(activate) +SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate) + : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0) { if (_active) { - outer->change_strong_roots_parity(); + _sh->register_strong_roots_scope(this); + _sh->change_strong_roots_parity(); // Zero the claimed high water mark in the StringTable StringTable::clear_parallel_claimed_index(); } } SharedHeap::StrongRootsScope::~StrongRootsScope() { - // nothing particular + if (_active) { + _sh->unregister_strong_roots_scope(this); + } } -void SharedHeap::process_strong_roots(bool activate_scope, - bool is_scavenging, - ScanningOption so, - OopClosure* roots, - CodeBlobClosure* code_roots, - KlassClosure* klass_closure) { +Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false); + +void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) { + // The Thread work barrier is only needed by G1 Class Unloading. + // No need to use the barrier if this is single-threaded code. + if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) { + uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads); + if (new_value == n_workers) { + // This thread is last. Notify the others. + MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); + _lock->notify_all(); + } + } +} + +void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) { + assert(UseG1GC, "Currently only used by G1"); + assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading"); + + // No need to use the barrier if this is single-threaded code. + if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) { + MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); + while ((uint)_n_workers_done_with_threads != n_workers) { + _lock->wait(Mutex::_no_safepoint_check_flag, 0, false); + } + } +} + +void SharedHeap::process_roots(bool activate_scope, + ScanningOption so, + OopClosure* strong_roots, + OopClosure* weak_roots, + CLDClosure* strong_cld_closure, + CLDClosure* weak_cld_closure, + CodeBlobClosure* code_roots) { StrongRootsScope srs(this, activate_scope); - // General strong roots. + // General roots. assert(_strong_roots_parity != 0, "must have called prologue code"); + assert(code_roots != NULL, "code root closure should always be set"); // _n_termination for _process_strong_tasks should be set up stream // in a method not running in a GC worker. Otherwise the GC worker // could be trying to change the termination condition while the task // is executing in another GC worker. + + // Iterating over the CLDG and the Threads are done early to allow G1 to + // first process the strong CLDs and nmethods and then, after a barrier, + // let the thread process the weak CLDs and nmethods. + + if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) { + ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure); + } + + // Some CLDs contained in the thread frames should be considered strong. + // Don't process them if they will be processed during the ClassLoaderDataGraph phase. + CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL; + // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway + CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots; + + Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p); + + // This is the point where this worker thread will not find more strong CLDs/nmethods. + // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing. + active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads()); + if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) { - Universe::oops_do(roots); + Universe::oops_do(strong_roots); } // Global (strong) JNI handles if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) - JNIHandles::oops_do(roots); - - // All threads execute this; the individual threads are task groups. - CLDToOopClosure roots_from_clds(roots); - CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds); - if (CollectedHeap::use_parallel_gc_threads()) { - Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots); - } else { - Threads::oops_do(roots, roots_from_clds_p, code_roots); - } + JNIHandles::oops_do(strong_roots); if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do)) - ObjectSynchronizer::oops_do(roots); + ObjectSynchronizer::oops_do(strong_roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do)) - FlatProfiler::oops_do(roots); + FlatProfiler::oops_do(strong_roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do)) - Management::oops_do(roots); + Management::oops_do(strong_roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do)) - JvmtiExport::oops_do(roots); + JvmtiExport::oops_do(strong_roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) { - if (so & SO_AllClasses) { - SystemDictionary::oops_do(roots); - } else if (so & SO_SystemClasses) { - SystemDictionary::always_strong_oops_do(roots); - } else { - fatal("We should always have selected either SO_AllClasses or SO_SystemClasses"); - } - } - - if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) { - if (so & SO_AllClasses) { - ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging); - } else if (so & SO_SystemClasses) { - ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging); - } + SystemDictionary::roots_oops_do(strong_roots, weak_roots); } // All threads execute the following. A specific chunk of buckets // from the StringTable are the individual tasks. - if (so & SO_Strings) { + if (weak_roots != NULL) { if (CollectedHeap::use_parallel_gc_threads()) { - StringTable::possibly_parallel_oops_do(roots); + StringTable::possibly_parallel_oops_do(weak_roots); } else { - StringTable::oops_do(roots); + StringTable::oops_do(weak_roots); } } if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) { - if (so & SO_CodeCache) { + if (so & SO_ScavengeCodeCache) { assert(code_roots != NULL, "must supply closure for code cache"); - if (is_scavenging) { - // We only visit parts of the CodeCache when scavenging. - CodeCache::scavenge_root_nmethods_do(code_roots); - } else { - // CMSCollector uses this to do intermediate-strength collections. - // We scan the entire code cache, since CodeCache::do_unloading is not called. - CodeCache::blobs_do(code_roots); - } + // We only visit parts of the CodeCache when scavenging. + CodeCache::scavenge_root_nmethods_do(code_roots); + } + if (so & SO_AllCodeCache) { + assert(code_roots != NULL, "must supply closure for code cache"); + + // CMSCollector uses this to do intermediate-strength collections. + // We scan the entire code cache, since CodeCache::do_unloading is not called. + CodeCache::blobs_do(code_roots); } // Verify that the code cache contents are not subject to // movement by a scavenging collection. - DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false)); + DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations)); DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); } _process_strong_tasks->all_tasks_completed(); } +void SharedHeap::process_all_roots(bool activate_scope, + ScanningOption so, + OopClosure* roots, + CLDClosure* cld_closure, + CodeBlobClosure* code_closure) { + process_roots(activate_scope, so, + roots, roots, + cld_closure, cld_closure, + code_closure); +} + +void SharedHeap::process_strong_roots(bool activate_scope, + ScanningOption so, + OopClosure* roots, + CLDClosure* cld_closure, + CodeBlobClosure* code_closure) { + process_roots(activate_scope, so, + roots, NULL, + cld_closure, NULL, + code_closure); +} + + class AlwaysTrueClosure: public BoolObjectClosure { public: bool do_object_b(oop p) { return true; } }; static AlwaysTrueClosure always_true; -void SharedHeap::process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots) { +void SharedHeap::process_weak_roots(OopClosure* root_closure) { // Global (weak) JNI handles JNIHandles::weak_oops_do(&always_true, root_closure); - - CodeCache::blobs_do(code_roots); - StringTable::oops_do(root_closure); } void SharedHeap::set_barrier_set(BarrierSet* bs) { --- ./hotspot/src/share/vm/memory/sharedHeap.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/sharedHeap.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -69,14 +69,10 @@ // number of active GC workers. CompactibleFreeListSpace and Space // have SequentialSubTasksDone's. // Example of using SubTasksDone and SequentialSubTasksDone -// G1CollectedHeap::g1_process_strong_roots() calls -// process_strong_roots(false, // no scoping; this is parallel code -// is_scavenging, so, -// &buf_scan_non_heap_roots, -// &eager_scan_code_roots); -// which delegates to SharedHeap::process_strong_roots() and uses +// G1CollectedHeap::g1_process_roots() +// to SharedHeap::process_roots() and uses // SubTasksDone* _process_strong_tasks to claim tasks. -// process_strong_roots() calls +// process_roots() calls // rem_set()->younger_refs_iterate() // to scan the card table and which eventually calls down into // CardTableModRefBS::par_non_clean_card_iterate_work(). This method @@ -163,9 +159,6 @@ // Iteration functions. void oop_iterate(ExtendedOopClosure* cl) = 0; - // Same as above, restricted to a memory region. - virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0; - // Iterate over all spaces in use in the heap, in an undefined order. virtual void space_iterate(SpaceClosure* cl) = 0; @@ -185,12 +178,12 @@ // task. (This also means that a parallel thread may only call // process_strong_roots once.) // - // For calls to process_strong_roots by sequential code, the parity is + // For calls to process_roots by sequential code, the parity is // updated automatically. // // The idea is that objects representing fine-grained tasks, such as // threads, will contain a "parity" field. A task will is claimed in the - // current "process_strong_roots" call only if its parity field is the + // current "process_roots" call only if its parity field is the // same as the "strong_roots_parity"; task claiming is accomplished by // updating the parity field to the strong_roots_parity with a CAS. // @@ -201,27 +194,45 @@ // c) to never return a distinguished value (zero) with which such // task-claiming variables may be initialized, to indicate "never // claimed". - private: - void change_strong_roots_parity(); public: int strong_roots_parity() { return _strong_roots_parity; } - // Call these in sequential code around process_strong_roots. + // Call these in sequential code around process_roots. // strong_roots_prologue calls change_strong_roots_parity, if // parallel tasks are enabled. class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope { - public: - StrongRootsScope(SharedHeap* outer, bool activate = true); + // Used to implement the Thread work barrier. + static Monitor* _lock; + + SharedHeap* _sh; + volatile jint _n_workers_done_with_threads; + + public: + StrongRootsScope(SharedHeap* heap, bool activate = true); ~StrongRootsScope(); + + // Mark that this thread is done with the Threads work. + void mark_worker_done_with_threads(uint n_workers); + // Wait until all n_workers are done with the Threads work. + void wait_until_all_workers_done_with_threads(uint n_workers); }; friend class StrongRootsScope; + // The current active StrongRootScope + StrongRootsScope* _strong_roots_scope; + + StrongRootsScope* active_strong_roots_scope() const; + + private: + void register_strong_roots_scope(StrongRootsScope* scope); + void unregister_strong_roots_scope(StrongRootsScope* scope); + void change_strong_roots_parity(); + + public: enum ScanningOption { - SO_None = 0x0, - SO_AllClasses = 0x1, - SO_SystemClasses = 0x2, - SO_Strings = 0x4, - SO_CodeCache = 0x8 + SO_None = 0x0, + SO_AllCodeCache = 0x8, + SO_ScavengeCodeCache = 0x10 }; FlexibleWorkGang* workers() const { return _workers; } @@ -229,22 +240,29 @@ // Invoke the "do_oop" method the closure "roots" on all root locations. // The "so" argument determines which roots the closure is applied to: // "SO_None" does none; - // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; - // "SO_SystemClasses" to all the "system" classes and loaders; - // "SO_Strings" applies the closure to all entries in StringTable; - // "SO_CodeCache" applies the closure to all elements of the CodeCache. + // "SO_AllCodeCache" applies the closure to all elements of the CodeCache. + // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache. + void process_roots(bool activate_scope, + ScanningOption so, + OopClosure* strong_roots, + OopClosure* weak_roots, + CLDClosure* strong_cld_closure, + CLDClosure* weak_cld_closure, + CodeBlobClosure* code_roots); + void process_all_roots(bool activate_scope, + ScanningOption so, + OopClosure* roots, + CLDClosure* cld_closure, + CodeBlobClosure* code_roots); void process_strong_roots(bool activate_scope, - bool is_scavenging, ScanningOption so, OopClosure* roots, - CodeBlobClosure* code_roots, - KlassClosure* klass_closure); + CLDClosure* cld_closure, + CodeBlobClosure* code_roots); - // Apply "blk" to all the weak roots of the system. These include - // JNI weak roots, the code cache, system dictionary, symbol table, - // string table. - void process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots); + + // Apply "root_closure" to the JNI weak roots.. + void process_weak_roots(OopClosure* root_closure); // The functions below are helper functions that a subclass of // "SharedHeap" can use in the implementation of its virtual @@ -257,7 +275,7 @@ virtual void gc_epilogue(bool full) = 0; // Sets the number of parallel threads that will be doing tasks - // (such as process strong roots) subsequently. + // (such as process roots) subsequently. virtual void set_par_threads(uint t); int n_termination(); @@ -274,4 +292,8 @@ size_t capacity); }; +inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) { + return static_cast(static_cast(so0) | static_cast(so1)); +} + #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP --- ./hotspot/src/share/vm/memory/space.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/space.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,6 +28,7 @@ #include "gc_implementation/shared/liveRange.hpp" #include "gc_implementation/shared/markSweep.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" +#include "gc_interface/collectedHeap.inline.hpp" #include "memory/blockOffsetTable.inline.hpp" #include "memory/defNewGeneration.hpp" #include "memory/genCollectedHeap.hpp" @@ -37,14 +38,13 @@ #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" #include "runtime/java.hpp" +#include "runtime/prefetch.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/safepoint.hpp" #include "utilities/copy.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" -void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } -void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } - PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, @@ -307,10 +307,6 @@ CompactibleSpace::clear(mangle_space); } -bool ContiguousSpace::is_in(const void* p) const { - return _bottom <= p && p < _top; -} - bool ContiguousSpace::is_free_block(const HeapWord* p) const { return p >= _top; } @@ -552,115 +548,11 @@ object_iterate(&blk2); } -HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) { - guarantee(false, "NYI"); - return bottom(); -} - -HeapWord* Space::object_iterate_careful_m(MemRegion mr, - ObjectClosureCareful* cl) { - guarantee(false, "NYI"); - return bottom(); -} - - -void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { - assert(!mr.is_empty(), "Should be non-empty"); - // We use MemRegion(bottom(), end()) rather than used_region() below - // because the two are not necessarily equal for some kinds of - // spaces, in particular, certain kinds of free list spaces. - // We could use the more complicated but more precise: - // MemRegion(used_region().start(), round_to(used_region().end(), CardSize)) - // but the slight imprecision seems acceptable in the assertion check. - assert(MemRegion(bottom(), end()).contains(mr), - "Should be within used space"); - HeapWord* prev = cl->previous(); // max address from last time - if (prev >= mr.end()) { // nothing to do - return; - } - // This assert will not work when we go from cms space to perm - // space, and use same closure. Easy fix deferred for later. XXX YSR - // assert(prev == NULL || contains(prev), "Should be within space"); - - bool last_was_obj_array = false; - HeapWord *blk_start_addr, *region_start_addr; - if (prev > mr.start()) { - region_start_addr = prev; - blk_start_addr = prev; - // The previous invocation may have pushed "prev" beyond the - // last allocated block yet there may be still be blocks - // in this region due to a particular coalescing policy. - // Relax the assertion so that the case where the unallocated - // block is maintained and "prev" is beyond the unallocated - // block does not cause the assertion to fire. - assert((BlockOffsetArrayUseUnallocatedBlock && - (!is_in(prev))) || - (blk_start_addr == block_start(region_start_addr)), "invariant"); - } else { - region_start_addr = mr.start(); - blk_start_addr = block_start(region_start_addr); - } - HeapWord* region_end_addr = mr.end(); - MemRegion derived_mr(region_start_addr, region_end_addr); - while (blk_start_addr < region_end_addr) { - const size_t size = block_size(blk_start_addr); - if (block_is_obj(blk_start_addr)) { - last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); - } else { - last_was_obj_array = false; - } - blk_start_addr += size; - } - if (!last_was_obj_array) { - assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), - "Should be within (closed) used space"); - assert(blk_start_addr > prev, "Invariant"); - cl->set_previous(blk_start_addr); // min address for next time - } -} - bool Space::obj_is_alive(const HeapWord* p) const { assert (block_is_obj(p), "The address should point to an object"); return true; } -void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { - assert(!mr.is_empty(), "Should be non-empty"); - assert(used_region().contains(mr), "Should be within used space"); - HeapWord* prev = cl->previous(); // max address from last time - if (prev >= mr.end()) { // nothing to do - return; - } - // See comment above (in more general method above) in case you - // happen to use this method. - assert(prev == NULL || is_in_reserved(prev), "Should be within space"); - - bool last_was_obj_array = false; - HeapWord *obj_start_addr, *region_start_addr; - if (prev > mr.start()) { - region_start_addr = prev; - obj_start_addr = prev; - assert(obj_start_addr == block_start(region_start_addr), "invariant"); - } else { - region_start_addr = mr.start(); - obj_start_addr = block_start(region_start_addr); - } - HeapWord* region_end_addr = mr.end(); - MemRegion derived_mr(region_start_addr, region_end_addr); - while (obj_start_addr < region_end_addr) { - oop obj = oop(obj_start_addr); - const size_t size = obj->size(); - last_was_obj_array = cl->do_object_bm(obj, derived_mr); - obj_start_addr += size; - } - if (!last_was_obj_array) { - assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()), - "Should be within (closed) used space"); - assert(obj_start_addr > prev, "Invariant"); - cl->set_previous(obj_start_addr); // min address for next time - } -} - #if INCLUDE_ALL_GCS #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ \ @@ -688,43 +580,6 @@ } } -void ContiguousSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* blk) { - if (is_empty()) { - return; - } - MemRegion cur = MemRegion(bottom(), top()); - mr = mr.intersection(cur); - if (mr.is_empty()) { - return; - } - if (mr.equals(cur)) { - oop_iterate(blk); - return; - } - assert(mr.end() <= top(), "just took an intersection above"); - HeapWord* obj_addr = block_start(mr.start()); - HeapWord* t = mr.end(); - - // Handle first object specially. - oop obj = oop(obj_addr); - SpaceMemRegionOopsIterClosure smr_blk(blk, mr); - obj_addr += obj->oop_iterate(&smr_blk); - while (obj_addr < t) { - oop obj = oop(obj_addr); - assert(obj->is_oop(), "expected an oop"); - obj_addr += obj->size(); - // If "obj_addr" is not greater than top, then the - // entire object "obj" is within the region. - if (obj_addr <= t) { - obj->oop_iterate(blk); - } else { - // "obj" extends beyond end of region - obj->oop_iterate(&smr_blk); - break; - } - }; -} - void ContiguousSpace::object_iterate(ObjectClosure* blk) { if (is_empty()) return; WaterMark bm = bottom_mark(); @@ -830,14 +685,8 @@ // This version requires locking. inline HeapWord* ContiguousSpace::allocate_impl(size_t size, HeapWord* const end_value) { - // In G1 there are places where a GC worker can allocates into a - // region using this serial allocation code without being prone to a - // race with other GC workers (we ensure that no other GC worker can - // access the same region at the same time). So the assert below is - // too strong in the case of G1. assert(Heap_lock->owned_by_self() || - (SafepointSynchronize::is_at_safepoint() && - (Thread::current()->is_VM_thread() || UseG1GC)), + (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked"); HeapWord* obj = top(); if (pointer_delta(end_value, obj) >= size) { @@ -871,6 +720,27 @@ } while (true); } +HeapWord* ContiguousSpace::allocate_aligned(size_t size) { + assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked"); + HeapWord* end_value = end(); + + HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes); + if (obj == NULL) { + return NULL; + } + + if (pointer_delta(end_value, obj) >= size) { + HeapWord* new_top = obj + size; + set_top(new_top); + assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top), + "checking alignment"); + return obj; + } else { + set_top(obj); + return NULL; + } +} + // Requires locking. HeapWord* ContiguousSpace::allocate(size_t size) { return allocate_impl(size, end()); --- ./hotspot/src/share/vm/memory/space.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/space.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -33,24 +33,8 @@ #include "memory/watermark.hpp" #include "oops/markOop.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/prefetch.hpp" #include "utilities/macros.hpp" #include "utilities/workgroup.hpp" -#ifdef TARGET_OS_FAMILY_linux -# include "os_linux.inline.hpp" -#endif -#ifdef TARGET_OS_FAMILY_solaris -# include "os_solaris.inline.hpp" -#endif -#ifdef TARGET_OS_FAMILY_windows -# include "os_windows.inline.hpp" -#endif -#ifdef TARGET_OS_FAMILY_aix -# include "os_aix.inline.hpp" -#endif -#ifdef TARGET_OS_FAMILY_bsd -# include "os_bsd.inline.hpp" -#endif // A space is an abstraction for the "storage units" backing // up the generation abstraction. It includes specific @@ -81,31 +65,6 @@ class CardTableRS; class DirtyCardToOopClosure; -// An oop closure that is circumscribed by a filtering memory region. -class SpaceMemRegionOopsIterClosure: public ExtendedOopClosure { - private: - ExtendedOopClosure* _cl; - MemRegion _mr; - protected: - template void do_oop_work(T* p) { - if (_mr.contains(p)) { - _cl->do_oop(p); - } - } - public: - SpaceMemRegionOopsIterClosure(ExtendedOopClosure* cl, MemRegion mr): - _cl(cl), _mr(mr) {} - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); - virtual bool do_metadata() { - // _cl is of type ExtendedOopClosure instead of OopClosure, so that we can check this. - assert(!_cl->do_metadata(), "I've checked all call paths, this shouldn't happen."); - return false; - } - virtual void do_klass(Klass* k) { ShouldNotReachHere(); } - virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); } -}; - // A Space describes a heap area. Class Space is an abstract // base class. // @@ -145,6 +104,12 @@ void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } + // Returns true if this object has been allocated since a + // generation's "save_marks" call. + virtual bool obj_allocated_since_save_marks(const oop obj) const { + return (HeapWord*)obj >= saved_mark_word(); + } + MemRegionClosure* preconsumptionDirtyCardClosure() const { return _preconsumptionDirtyCardClosure; } @@ -152,9 +117,9 @@ _preconsumptionDirtyCardClosure = cl; } - // Returns a subregion of the space containing all the objects in + // Returns a subregion of the space containing only the allocated objects in // the space. - virtual MemRegion used_region() const { return MemRegion(bottom(), end()); } + virtual MemRegion used_region() const = 0; // Returns a region that is guaranteed to contain (at least) all objects // allocated at the time of the last call to "save_marks". If the space @@ -164,7 +129,7 @@ // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of // the space must distiguish between objects in the region allocated before // and after the call to save marks. - virtual MemRegion used_region_at_save_marks() const { + MemRegion used_region_at_save_marks() const { return MemRegion(bottom(), saved_mark_word()); } @@ -197,7 +162,9 @@ // expensive operation. To prevent performance problems // on account of its inadvertent use in product jvm's, // we restrict its use to assertion checks only. - virtual bool is_in(const void* p) const = 0; + bool is_in(const void* p) const { + return used_region().contains(p); + } // Returns true iff the given reserved memory of the space contains the // given address. @@ -221,11 +188,6 @@ // applications of the closure are not included in the iteration. virtual void oop_iterate(ExtendedOopClosure* cl); - // Same as above, restricted to the intersection of a memory region and - // the space. Fields in objects allocated by applications of the closure - // are not included in the iteration. - virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0; - // Iterate over all objects in the space, calling "cl.do_object" on // each. Objects allocated by applications of the closure are not // included in the iteration. @@ -234,24 +196,6 @@ // objects whose internal references point to objects in the space. virtual void safe_object_iterate(ObjectClosure* blk) = 0; - // Iterate over all objects that intersect with mr, calling "cl->do_object" - // on each. There is an exception to this: if this closure has already - // been invoked on an object, it may skip such objects in some cases. This is - // Most likely to happen in an "upwards" (ascending address) iteration of - // MemRegions. - virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); - - // Iterate over as many initialized objects in the space as possible, - // calling "cl.do_object_careful" on each. Return NULL if all objects - // in the space (at the start of the iteration) were iterated over. - // Return an address indicating the extent of the iteration in the - // event that the iteration had to return because of finding an - // uninitialized object in the space, or if the closure "cl" - // signalled early termination. - virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl); - virtual HeapWord* object_iterate_careful_m(MemRegion mr, - ObjectClosureCareful* cl); - // Create and return a new dirty card to oop closure. Can be // overriden to return the appropriate type of closure // depending on the type of space in which the closure will @@ -292,10 +236,6 @@ // Allocation (return NULL if full). Enforces mutual exclusion internally. virtual HeapWord* par_allocate(size_t word_size) = 0; - // Returns true if this object has been allocated since a - // generation's "save_marks" call. - virtual bool obj_allocated_since_save_marks(const oop obj) const = 0; - // Mark-sweep-compact support: all spaces can update pointers to objects // moving as a part of compaction. virtual void adjust_pointers(); @@ -390,12 +330,11 @@ Generation* gen; CompactibleSpace* space; HeapWord* threshold; - CompactPoint(Generation* _gen, CompactibleSpace* _space, - HeapWord* _threshold) : - gen(_gen), space(_space), threshold(_threshold) {} + + CompactPoint(Generation* g = NULL) : + gen(g), space(NULL), threshold(0) {} }; - // A space that supports compaction operations. This is usually, but not // necessarily, a space that is normally contiguous. But, for example, a // free-list-based space whose normal collection is a mark-sweep without @@ -427,7 +366,7 @@ // Perform operations on the space needed after a compaction // has been performed. - virtual void reset_after_compaction() {} + virtual void reset_after_compaction() = 0; // Returns the next space (in the current generation) to be compacted in // the global compaction order. Also is used to select the next @@ -492,7 +431,7 @@ HeapWord* _end_of_live; // Minimum size of a free block. - virtual size_t minimum_free_block_size() const = 0; + virtual size_t minimum_free_block_size() const { return 0; } // This the function is invoked when an allocation of an object covering // "start" to "end occurs crosses the threshold; returns the next @@ -512,272 +451,6 @@ size_t word_len); }; -#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ - /* Compute the new addresses for the live objects and store it in the mark \ - * Used by universe::mark_sweep_phase2() \ - */ \ - HeapWord* compact_top; /* This is where we are currently compacting to. */ \ - \ - /* We're sure to be here before any objects are compacted into this \ - * space, so this is a good time to initialize this: \ - */ \ - set_compaction_top(bottom()); \ - \ - if (cp->space == NULL) { \ - assert(cp->gen != NULL, "need a generation"); \ - assert(cp->threshold == NULL, "just checking"); \ - assert(cp->gen->first_compaction_space() == this, "just checking"); \ - cp->space = cp->gen->first_compaction_space(); \ - compact_top = cp->space->bottom(); \ - cp->space->set_compaction_top(compact_top); \ - cp->threshold = cp->space->initialize_threshold(); \ - } else { \ - compact_top = cp->space->compaction_top(); \ - } \ - \ - /* We allow some amount of garbage towards the bottom of the space, so \ - * we don't start compacting before there is a significant gain to be made.\ - * Occasionally, we want to ensure a full compaction, which is determined \ - * by the MarkSweepAlwaysCompactCount parameter. \ - */ \ - uint invocations = MarkSweep::total_invocations(); \ - bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ - \ - size_t allowed_deadspace = 0; \ - if (skip_dead) { \ - const size_t ratio = allowed_dead_ratio(); \ - allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ - } \ - \ - HeapWord* q = bottom(); \ - HeapWord* t = scan_limit(); \ - \ - HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ - live object. */ \ - HeapWord* first_dead = end();/* The first dead object. */ \ - LiveRange* liveRange = NULL; /* The current live range, recorded in the \ - first header of preceding free area. */ \ - _first_dead = first_dead; \ - \ - const intx interval = PrefetchScanIntervalInBytes; \ - \ - while (q < t) { \ - assert(!block_is_obj(q) || \ - oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ - oop(q)->mark()->has_bias_pattern(), \ - "these are the only valid states during a mark sweep"); \ - if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ - /* prefetch beyond q */ \ - Prefetch::write(q, interval); \ - size_t size = block_size(q); \ - compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ - q += size; \ - end_of_live = q; \ - } else { \ - /* run over all the contiguous dead objects */ \ - HeapWord* end = q; \ - do { \ - /* prefetch beyond end */ \ - Prefetch::write(end, interval); \ - end += block_size(end); \ - } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ - \ - /* see if we might want to pretend this object is alive so that \ - * we don't have to compact quite as often. \ - */ \ - if (allowed_deadspace > 0 && q == compact_top) { \ - size_t sz = pointer_delta(end, q); \ - if (insert_deadspace(allowed_deadspace, q, sz)) { \ - compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ - q = end; \ - end_of_live = end; \ - continue; \ - } \ - } \ - \ - /* otherwise, it really is a free region. */ \ - \ - /* for the previous LiveRange, record the end of the live objects. */ \ - if (liveRange) { \ - liveRange->set_end(q); \ - } \ - \ - /* record the current LiveRange object. \ - * liveRange->start() is overlaid on the mark word. \ - */ \ - liveRange = (LiveRange*)q; \ - liveRange->set_start(end); \ - liveRange->set_end(end); \ - \ - /* see if this is the first dead region. */ \ - if (q < first_dead) { \ - first_dead = q; \ - } \ - \ - /* move on to the next object */ \ - q = end; \ - } \ - } \ - \ - assert(q == t, "just checking"); \ - if (liveRange != NULL) { \ - liveRange->set_end(q); \ - } \ - _end_of_live = end_of_live; \ - if (end_of_live < first_dead) { \ - first_dead = end_of_live; \ - } \ - _first_dead = first_dead; \ - \ - /* save the compaction_top of the compaction space. */ \ - cp->space->set_compaction_top(compact_top); \ -} - -#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ - /* adjust all the interior pointers to point at the new locations of objects \ - * Used by MarkSweep::mark_sweep_phase3() */ \ - \ - HeapWord* q = bottom(); \ - HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ - \ - assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ - \ - if (q < t && _first_dead > q && \ - !oop(q)->is_gc_marked()) { \ - /* we have a chunk of the space which hasn't moved and we've \ - * reinitialized the mark word during the previous pass, so we can't \ - * use is_gc_marked for the traversal. */ \ - HeapWord* end = _first_dead; \ - \ - while (q < end) { \ - /* I originally tried to conjoin "block_start(q) == q" to the \ - * assertion below, but that doesn't work, because you can't \ - * accurately traverse previous objects to get to the current one \ - * after their pointers have been \ - * updated, until the actual compaction is done. dld, 4/00 */ \ - assert(block_is_obj(q), \ - "should be at block boundaries, and should be looking at objs"); \ - \ - /* point all the oops to the new location */ \ - size_t size = oop(q)->adjust_pointers(); \ - size = adjust_obj_size(size); \ - \ - q += size; \ - } \ - \ - if (_first_dead == t) { \ - q = t; \ - } else { \ - /* $$$ This is funky. Using this to read the previously written \ - * LiveRange. See also use below. */ \ - q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ - } \ - } \ - \ - const intx interval = PrefetchScanIntervalInBytes; \ - \ - debug_only(HeapWord* prev_q = NULL); \ - while (q < t) { \ - /* prefetch beyond q */ \ - Prefetch::write(q, interval); \ - if (oop(q)->is_gc_marked()) { \ - /* q is alive */ \ - /* point all the oops to the new location */ \ - size_t size = oop(q)->adjust_pointers(); \ - size = adjust_obj_size(size); \ - debug_only(prev_q = q); \ - q += size; \ - } else { \ - /* q is not a live object, so its mark should point at the next \ - * live object */ \ - debug_only(prev_q = q); \ - q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ - assert(q > prev_q, "we should be moving forward through memory"); \ - } \ - } \ - \ - assert(q == t, "just checking"); \ -} - -#define SCAN_AND_COMPACT(obj_size) { \ - /* Copy all live objects to their new location \ - * Used by MarkSweep::mark_sweep_phase4() */ \ - \ - HeapWord* q = bottom(); \ - HeapWord* const t = _end_of_live; \ - debug_only(HeapWord* prev_q = NULL); \ - \ - if (q < t && _first_dead > q && \ - !oop(q)->is_gc_marked()) { \ - debug_only( \ - /* we have a chunk of the space which hasn't moved and we've reinitialized \ - * the mark word during the previous pass, so we can't use is_gc_marked for \ - * the traversal. */ \ - HeapWord* const end = _first_dead; \ - \ - while (q < end) { \ - size_t size = obj_size(q); \ - assert(!oop(q)->is_gc_marked(), \ - "should be unmarked (special dense prefix handling)"); \ - debug_only(prev_q = q); \ - q += size; \ - } \ - ) /* debug_only */ \ - \ - if (_first_dead == t) { \ - q = t; \ - } else { \ - /* $$$ Funky */ \ - q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ - } \ - } \ - \ - const intx scan_interval = PrefetchScanIntervalInBytes; \ - const intx copy_interval = PrefetchCopyIntervalInBytes; \ - while (q < t) { \ - if (!oop(q)->is_gc_marked()) { \ - /* mark is pointer to next marked oop */ \ - debug_only(prev_q = q); \ - q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ - assert(q > prev_q, "we should be moving forward through memory"); \ - } else { \ - /* prefetch beyond q */ \ - Prefetch::read(q, scan_interval); \ - \ - /* size and destination */ \ - size_t size = obj_size(q); \ - HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ - \ - /* prefetch beyond compaction_top */ \ - Prefetch::write(compaction_top, copy_interval); \ - \ - /* copy object and reinit its mark */ \ - assert(q != compaction_top, "everything in this pass should be moving"); \ - Copy::aligned_conjoint_words(q, compaction_top, size); \ - oop(compaction_top)->init_mark(); \ - assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ - \ - debug_only(prev_q = q); \ - q += size; \ - } \ - } \ - \ - /* Let's remember if we were empty before we did the compaction. */ \ - bool was_empty = used_region().is_empty(); \ - /* Reset space after compaction is complete */ \ - reset_after_compaction(); \ - /* We do this clear, below, since it has overloaded meanings for some */ \ - /* space subtypes. For example, OffsetTableContigSpace's that were */ \ - /* compacted into will have had their offset table thresholds updated */ \ - /* continuously, but those that weren't need to have their thresholds */ \ - /* re-initialized. Also mangles unused area for debugging. */ \ - if (used_region().is_empty()) { \ - if (!was_empty) clear(SpaceDecorator::Mangle); \ - } else { \ - if (ZapUnusedHeapArea) mangle_unused_area(); \ - } \ -} - class GenSpaceMangler; // A space in which the free area is contiguous. It therefore supports @@ -808,7 +481,7 @@ HeapWord* top() const { return _top; } void set_top(HeapWord* value) { _top = value; } - virtual void set_saved_mark() { _saved_mark_word = top(); } + void set_saved_mark() { _saved_mark_word = top(); } void reset_saved_mark() { _saved_mark_word = bottom(); } WaterMark bottom_mark() { return WaterMark(this, bottom()); } @@ -843,36 +516,31 @@ size_t used() const { return byte_size(bottom(), top()); } size_t free() const { return byte_size(top(), end()); } - // Override from space. - bool is_in(const void* p) const; - virtual bool is_free_block(const HeapWord* p) const; // In a contiguous space we have a more obvious bound on what parts // contain objects. MemRegion used_region() const { return MemRegion(bottom(), top()); } - MemRegion used_region_at_save_marks() const { - return MemRegion(bottom(), saved_mark_word()); - } - // Allocation (return NULL if full) virtual HeapWord* allocate(size_t word_size); virtual HeapWord* par_allocate(size_t word_size); - - virtual bool obj_allocated_since_save_marks(const oop obj) const { - return (HeapWord*)obj >= saved_mark_word(); - } + HeapWord* allocate_aligned(size_t word_size); // Iteration void oop_iterate(ExtendedOopClosure* cl); - void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); void object_iterate(ObjectClosure* blk); // For contiguous spaces this method will iterate safely over objects // in the space (i.e., between bottom and top) when at a safepoint. void safe_object_iterate(ObjectClosure* blk); - void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); - // iterates on objects up to the safe limit + + // Iterate over as many initialized objects in the space as possible, + // calling "cl.do_object_careful" on each. Return NULL if all objects + // in the space (at the start of the iteration) were iterated over. + // Return an address indicating the extent of the iteration in the + // event that the iteration had to return because of finding an + // uninitialized object in the space, or if the closure "cl" + // signaled early termination. HeapWord* object_iterate_careful(ObjectClosureCareful* cl); HeapWord* concurrent_iteration_safe_limit() { assert(_concurrent_iteration_safe_limit <= top(), @@ -903,7 +571,6 @@ // set new iteration safe limit set_concurrent_iteration_safe_limit(compaction_top()); } - virtual size_t minimum_free_block_size() const { return 0; } // Override. DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, --- ./hotspot/src/share/vm/memory/space.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/space.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,12 +28,279 @@ #include "gc_interface/collectedHeap.hpp" #include "memory/space.hpp" #include "memory/universe.hpp" +#include "runtime/prefetch.inline.hpp" #include "runtime/safepoint.hpp" inline HeapWord* Space::block_start(const void* p) { return block_start_const(p); } +#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ + /* Compute the new addresses for the live objects and store it in the mark \ + * Used by universe::mark_sweep_phase2() \ + */ \ + HeapWord* compact_top; /* This is where we are currently compacting to. */ \ + \ + /* We're sure to be here before any objects are compacted into this \ + * space, so this is a good time to initialize this: \ + */ \ + set_compaction_top(bottom()); \ + \ + if (cp->space == NULL) { \ + assert(cp->gen != NULL, "need a generation"); \ + assert(cp->threshold == NULL, "just checking"); \ + assert(cp->gen->first_compaction_space() == this, "just checking"); \ + cp->space = cp->gen->first_compaction_space(); \ + compact_top = cp->space->bottom(); \ + cp->space->set_compaction_top(compact_top); \ + cp->threshold = cp->space->initialize_threshold(); \ + } else { \ + compact_top = cp->space->compaction_top(); \ + } \ + \ + /* We allow some amount of garbage towards the bottom of the space, so \ + * we don't start compacting before there is a significant gain to be made.\ + * Occasionally, we want to ensure a full compaction, which is determined \ + * by the MarkSweepAlwaysCompactCount parameter. \ + */ \ + uint invocations = MarkSweep::total_invocations(); \ + bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ + \ + size_t allowed_deadspace = 0; \ + if (skip_dead) { \ + const size_t ratio = allowed_dead_ratio(); \ + allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ + } \ + \ + HeapWord* q = bottom(); \ + HeapWord* t = scan_limit(); \ + \ + HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ + live object. */ \ + HeapWord* first_dead = end();/* The first dead object. */ \ + LiveRange* liveRange = NULL; /* The current live range, recorded in the \ + first header of preceding free area. */ \ + _first_dead = first_dead; \ + \ + const intx interval = PrefetchScanIntervalInBytes; \ + \ + while (q < t) { \ + assert(!block_is_obj(q) || \ + oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ + oop(q)->mark()->has_bias_pattern(), \ + "these are the only valid states during a mark sweep"); \ + if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ + /* prefetch beyond q */ \ + Prefetch::write(q, interval); \ + size_t size = block_size(q); \ + compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ + q += size; \ + end_of_live = q; \ + } else { \ + /* run over all the contiguous dead objects */ \ + HeapWord* end = q; \ + do { \ + /* prefetch beyond end */ \ + Prefetch::write(end, interval); \ + end += block_size(end); \ + } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ + \ + /* see if we might want to pretend this object is alive so that \ + * we don't have to compact quite as often. \ + */ \ + if (allowed_deadspace > 0 && q == compact_top) { \ + size_t sz = pointer_delta(end, q); \ + if (insert_deadspace(allowed_deadspace, q, sz)) { \ + compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ + q = end; \ + end_of_live = end; \ + continue; \ + } \ + } \ + \ + /* otherwise, it really is a free region. */ \ + \ + /* for the previous LiveRange, record the end of the live objects. */ \ + if (liveRange) { \ + liveRange->set_end(q); \ + } \ + \ + /* record the current LiveRange object. \ + * liveRange->start() is overlaid on the mark word. \ + */ \ + liveRange = (LiveRange*)q; \ + liveRange->set_start(end); \ + liveRange->set_end(end); \ + \ + /* see if this is the first dead region. */ \ + if (q < first_dead) { \ + first_dead = q; \ + } \ + \ + /* move on to the next object */ \ + q = end; \ + } \ + } \ + \ + assert(q == t, "just checking"); \ + if (liveRange != NULL) { \ + liveRange->set_end(q); \ + } \ + _end_of_live = end_of_live; \ + if (end_of_live < first_dead) { \ + first_dead = end_of_live; \ + } \ + _first_dead = first_dead; \ + \ + /* save the compaction_top of the compaction space. */ \ + cp->space->set_compaction_top(compact_top); \ +} + +#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ + /* adjust all the interior pointers to point at the new locations of objects \ + * Used by MarkSweep::mark_sweep_phase3() */ \ + \ + HeapWord* q = bottom(); \ + HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ + \ + assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ + \ + if (q < t && _first_dead > q && \ + !oop(q)->is_gc_marked()) { \ + /* we have a chunk of the space which hasn't moved and we've \ + * reinitialized the mark word during the previous pass, so we can't \ + * use is_gc_marked for the traversal. */ \ + HeapWord* end = _first_dead; \ + \ + while (q < end) { \ + /* I originally tried to conjoin "block_start(q) == q" to the \ + * assertion below, but that doesn't work, because you can't \ + * accurately traverse previous objects to get to the current one \ + * after their pointers have been \ + * updated, until the actual compaction is done. dld, 4/00 */ \ + assert(block_is_obj(q), \ + "should be at block boundaries, and should be looking at objs"); \ + \ + /* point all the oops to the new location */ \ + size_t size = oop(q)->adjust_pointers(); \ + size = adjust_obj_size(size); \ + \ + q += size; \ + } \ + \ + if (_first_dead == t) { \ + q = t; \ + } else { \ + /* $$$ This is funky. Using this to read the previously written \ + * LiveRange. See also use below. */ \ + q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ + } \ + } \ + \ + const intx interval = PrefetchScanIntervalInBytes; \ + \ + debug_only(HeapWord* prev_q = NULL); \ + while (q < t) { \ + /* prefetch beyond q */ \ + Prefetch::write(q, interval); \ + if (oop(q)->is_gc_marked()) { \ + /* q is alive */ \ + /* point all the oops to the new location */ \ + size_t size = oop(q)->adjust_pointers(); \ + size = adjust_obj_size(size); \ + debug_only(prev_q = q); \ + q += size; \ + } else { \ + /* q is not a live object, so its mark should point at the next \ + * live object */ \ + debug_only(prev_q = q); \ + q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ + assert(q > prev_q, "we should be moving forward through memory"); \ + } \ + } \ + \ + assert(q == t, "just checking"); \ +} + +#define SCAN_AND_COMPACT(obj_size) { \ + /* Copy all live objects to their new location \ + * Used by MarkSweep::mark_sweep_phase4() */ \ + \ + HeapWord* q = bottom(); \ + HeapWord* const t = _end_of_live; \ + debug_only(HeapWord* prev_q = NULL); \ + \ + if (q < t && _first_dead > q && \ + !oop(q)->is_gc_marked()) { \ + debug_only( \ + /* we have a chunk of the space which hasn't moved and we've reinitialized \ + * the mark word during the previous pass, so we can't use is_gc_marked for \ + * the traversal. */ \ + HeapWord* const end = _first_dead; \ + \ + while (q < end) { \ + size_t size = obj_size(q); \ + assert(!oop(q)->is_gc_marked(), \ + "should be unmarked (special dense prefix handling)"); \ + debug_only(prev_q = q); \ + q += size; \ + } \ + ) /* debug_only */ \ + \ + if (_first_dead == t) { \ + q = t; \ + } else { \ + /* $$$ Funky */ \ + q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ + } \ + } \ + \ + const intx scan_interval = PrefetchScanIntervalInBytes; \ + const intx copy_interval = PrefetchCopyIntervalInBytes; \ + while (q < t) { \ + if (!oop(q)->is_gc_marked()) { \ + /* mark is pointer to next marked oop */ \ + debug_only(prev_q = q); \ + q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ + assert(q > prev_q, "we should be moving forward through memory"); \ + } else { \ + /* prefetch beyond q */ \ + Prefetch::read(q, scan_interval); \ + \ + /* size and destination */ \ + size_t size = obj_size(q); \ + HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ + \ + /* prefetch beyond compaction_top */ \ + Prefetch::write(compaction_top, copy_interval); \ + \ + /* copy object and reinit its mark */ \ + assert(q != compaction_top, "everything in this pass should be moving"); \ + Copy::aligned_conjoint_words(q, compaction_top, size); \ + oop(compaction_top)->init_mark(); \ + assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ + \ + debug_only(prev_q = q); \ + q += size; \ + } \ + } \ + \ + /* Let's remember if we were empty before we did the compaction. */ \ + bool was_empty = used_region().is_empty(); \ + /* Reset space after compaction is complete */ \ + reset_after_compaction(); \ + /* We do this clear, below, since it has overloaded meanings for some */ \ + /* space subtypes. For example, OffsetTableContigSpace's that were */ \ + /* compacted into will have had their offset table thresholds updated */ \ + /* continuously, but those that weren't need to have their thresholds */ \ + /* re-initialized. Also mangles unused area for debugging. */ \ + if (used_region().is_empty()) { \ + if (!was_empty) clear(SpaceDecorator::Mangle); \ + } else { \ + if (ZapUnusedHeapArea) mangle_unused_area(); \ + } \ +} + inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { HeapWord* res = ContiguousSpace::allocate(size); if (res != NULL) { --- ./hotspot/src/share/vm/memory/universe.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/universe.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,6 +26,9 @@ #include "classfile/classLoader.hpp" #include "classfile/classLoaderData.hpp" #include "classfile/javaClasses.hpp" +#if INCLUDE_CDS +#include "classfile/sharedClassUtil.hpp" +#endif #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" @@ -34,6 +37,7 @@ #include "gc_interface/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/cardTableModRefBS.hpp" +#include "memory/filemap.hpp" #include "memory/gcLocker.inline.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/genRemSet.hpp" @@ -74,7 +78,7 @@ #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" -#include "gc_implementation/g1/g1CollectorPolicy.hpp" +#include "gc_implementation/g1/g1CollectorPolicy_ext.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #endif // INCLUDE_ALL_GCS @@ -115,6 +119,7 @@ oop Universe::_out_of_memory_error_class_metaspace = NULL; oop Universe::_out_of_memory_error_array_size = NULL; oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; +oop Universe::_out_of_memory_error_realloc_objects = NULL; objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0; bool Universe::_verify_in_progress = false; @@ -122,6 +127,8 @@ oop Universe::_arithmetic_exception_instance = NULL; oop Universe::_virtual_machine_error_instance = NULL; oop Universe::_vm_exception = NULL; +oop Universe::_allocation_context_notification_obj = NULL; + Method* Universe::_throw_illegal_access_error = NULL; Array* Universe::_the_empty_int_array = NULL; Array* Universe::_the_empty_short_array = NULL; @@ -184,6 +191,7 @@ f->do_oop((oop*)&_out_of_memory_error_class_metaspace); f->do_oop((oop*)&_out_of_memory_error_array_size); f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); + f->do_oop((oop*)&_out_of_memory_error_realloc_objects); f->do_oop((oop*)&_preallocated_out_of_memory_error_array); f->do_oop((oop*)&_null_ptr_exception_instance); f->do_oop((oop*)&_arithmetic_exception_instance); @@ -191,6 +199,7 @@ f->do_oop((oop*)&_main_thread_group); f->do_oop((oop*)&_system_thread_group); f->do_oop((oop*)&_vm_exception); + f->do_oop((oop*)&_allocation_context_notification_obj); debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);) } @@ -238,8 +247,9 @@ void initialize_basic_type_klass(Klass* k, TRAPS) { Klass* ok = SystemDictionary::Object_klass(); if (UseSharedSpaces) { + ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data(); assert(k->super() == ok, "u3"); - k->restore_unshareable_info(CHECK); + k->restore_unshareable_info(loader_data, Handle(), CHECK); } else { k->initialize_supers(ok, CHECK); } @@ -566,7 +576,8 @@ (throwable() != Universe::_out_of_memory_error_metaspace) && (throwable() != Universe::_out_of_memory_error_class_metaspace) && (throwable() != Universe::_out_of_memory_error_array_size) && - (throwable() != Universe::_out_of_memory_error_gc_overhead_limit)); + (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) && + (throwable() != Universe::_out_of_memory_error_realloc_objects)); } @@ -665,6 +676,10 @@ SymbolTable::create_table(); StringTable::create_table(); ClassLoader::create_package_info_table(); + + if (DumpSharedSpaces) { + MetaspaceShared::prepare_for_dumping(); + } } return JNI_OK; @@ -786,7 +801,7 @@ } else if (UseG1GC) { #if INCLUDE_ALL_GCS - G1CollectorPolicy* g1p = new G1CollectorPolicy(); + G1CollectorPolicyExt* g1p = new G1CollectorPolicyExt(); g1p->initialize_all(); G1CollectedHeap* g1h = new G1CollectedHeap(g1p); Universe::_collectedHeap = g1h; @@ -1032,6 +1047,7 @@ Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false); Universe::_out_of_memory_error_gc_overhead_limit = k_h->allocate_instance(CHECK_false); + Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false); // Setup preallocated NullPointerException // (this is currently used for a cheap & dirty solution in compiler exception handling) @@ -1071,6 +1087,9 @@ msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg()); + msg = java_lang_String::create_from_str("Java heap space: failed reallocation of scalar replaced objects", CHECK_false); + java_lang_Throwable::set_message(Universe::_out_of_memory_error_realloc_objects, msg()); + msg = java_lang_String::create_from_str("/ by zero", CHECK_false); java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg()); @@ -1166,6 +1185,11 @@ MemoryService::add_metaspace_memory_pools(); MemoryService::set_universe_heap(Universe::_collectedHeap); +#if INCLUDE_CDS + if (UseSharedSpaces) { + SharedClassUtil::initialize(CHECK_false); + } +#endif return true; } --- ./hotspot/src/share/vm/memory/universe.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/universe.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -157,6 +157,7 @@ static oop _out_of_memory_error_class_metaspace; static oop _out_of_memory_error_array_size; static oop _out_of_memory_error_gc_overhead_limit; + static oop _out_of_memory_error_realloc_objects; static Array* _the_empty_int_array; // Canonicalized int array static Array* _the_empty_short_array; // Canonicalized short array @@ -178,6 +179,8 @@ // the vm thread. static oop _vm_exception; + static oop _allocation_context_notification_obj; + // The particular choice of collected heap. static CollectedHeap* _collectedHeap; @@ -307,6 +310,10 @@ static oop arithmetic_exception_instance() { return _arithmetic_exception_instance; } static oop virtual_machine_error_instance() { return _virtual_machine_error_instance; } static oop vm_exception() { return _vm_exception; } + + static inline oop allocation_context_notification_obj(); + static inline void set_allocation_context_notification_obj(oop obj); + static Method* throw_illegal_access_error() { return _throw_illegal_access_error; } static Array* the_empty_int_array() { return _the_empty_int_array; } @@ -322,6 +329,7 @@ static oop out_of_memory_error_class_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace); } static oop out_of_memory_error_array_size() { return gen_out_of_memory_error(_out_of_memory_error_array_size); } static oop out_of_memory_error_gc_overhead_limit() { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit); } + static oop out_of_memory_error_realloc_objects() { return gen_out_of_memory_error(_out_of_memory_error_realloc_objects); } // Accessors needed for fast allocation static Klass** boolArrayKlassObj_addr() { return &_boolArrayKlassObj; } --- ./hotspot/src/share/vm/memory/universe.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/memory/universe.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -41,4 +41,12 @@ return type == T_DOUBLE || type == T_LONG; } +inline oop Universe::allocation_context_notification_obj() { + return _allocation_context_notification_obj; +} + +inline void Universe::set_allocation_context_notification_obj(oop obj) { + _allocation_context_notification_obj = obj; +} + #endif // SHARE_VM_MEMORY_UNIVERSE_INLINE_HPP --- ./hotspot/src/share/vm/oops/arrayKlass.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/arrayKlass.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -100,7 +100,7 @@ ResourceMark rm(THREAD); k->initialize_supers(super_klass(), CHECK); k->vtable()->initialize_vtable(false, CHECK); - java_lang_Class::create_mirror(k, Handle(NULL), CHECK); + java_lang_Class::create_mirror(k, Handle(THREAD, k->class_loader()), Handle(NULL), CHECK); } GrowableArray* ArrayKlass::compute_secondary_supers(int num_extra_slots) { @@ -193,8 +193,9 @@ set_component_mirror(NULL); } -void ArrayKlass::restore_unshareable_info(TRAPS) { - Klass::restore_unshareable_info(CHECK); +void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) { + assert(loader_data == ClassLoaderData::the_null_class_loader_data(), "array classes belong to null loader"); + Klass::restore_unshareable_info(loader_data, protection_domain, CHECK); // Klass recreates the component mirror also } --- ./hotspot/src/share/vm/oops/arrayKlass.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/arrayKlass.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -141,7 +141,7 @@ // CDS support - remove and restore oops from metadata. Oops are not shared. virtual void remove_unshareable_info(); - virtual void restore_unshareable_info(TRAPS); + virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS); // Printing void print_on(outputStream* st) const; --- ./hotspot/src/share/vm/oops/constantPool.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/constantPool.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1817,11 +1817,22 @@ void ConstantPool::set_on_stack(const bool value) { if (value) { - _flags |= _on_stack; + int old_flags = *const_cast(&_flags); + while ((old_flags & _on_stack) == 0) { + int new_flags = old_flags | _on_stack; + int result = Atomic::cmpxchg(new_flags, &_flags, old_flags); + + if (result == old_flags) { + // Succeeded. + MetadataOnStackMark::record(this, Thread::current()); + return; + } + old_flags = result; + } } else { + // Clearing is done single-threadedly. _flags &= ~_on_stack; } - if (value) MetadataOnStackMark::record(this); } // JSR 292 support for patching constant pool oops after the class is linked and --- ./hotspot/src/share/vm/oops/cpCache.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/cpCache.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -33,6 +33,7 @@ #include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/methodHandles.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS # include "gc_implementation/parallelScavenge/psPromotionManager.hpp" --- ./hotspot/src/share/vm/oops/cpCache.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/cpCache.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -27,6 +27,7 @@ #include "interpreter/bytecodes.hpp" #include "memory/allocation.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/array.hpp" class PSPromotionManager; --- ./hotspot/src/share/vm/oops/instanceClassLoaderKlass.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/instanceClassLoaderKlass.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,6 +28,7 @@ #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/genOopClosures.inline.hpp" +#include "memory/iterator.inline.hpp" #include "memory/oopFactory.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceClassLoaderKlass.hpp" @@ -44,12 +45,6 @@ #include "oops/oop.pcgc.inline.hpp" #endif // INCLUDE_ALL_GCS -#define if_do_metadata_checked(closure, nv_suffix) \ - /* Make sure the non-virtual and the virtual versions match. */ \ - assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ - "Inconsistency in do_metadata"); \ - if (closure->do_metadata##nv_suffix()) - // Macro to define InstanceClassLoaderKlass::oop_oop_iterate for virtual/nonvirtual for // all closures. Macros calling macros above for each oop size. // Since ClassLoader objects have only a pointer to the loader_data, they are not --- ./hotspot/src/share/vm/oops/instanceKlass.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/instanceKlass.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -35,6 +35,7 @@ #include "jvmtifiles/jvmti.h" #include "memory/genOopClosures.inline.hpp" #include "memory/heapInspection.hpp" +#include "memory/iterator.inline.hpp" #include "memory/metadataFactory.hpp" #include "memory/oopFactory.hpp" #include "oops/fieldStreams.hpp" @@ -54,6 +55,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/thread.inline.hpp" #include "services/classLoadingService.hpp" #include "services/threadService.hpp" @@ -64,7 +66,7 @@ #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/parNew/parOopClosures.inline.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" @@ -287,6 +289,7 @@ set_static_oop_field_count(0); set_nonstatic_field_size(0); set_is_marked_dependent(false); + set_has_unloaded_dependent(false); set_init_state(InstanceKlass::allocated); set_init_thread(NULL); set_reference_type(rt); @@ -501,6 +504,8 @@ oop InstanceKlass::init_lock() const { // return the init lock from the mirror oop lock = java_lang_Class::init_lock(java_mirror()); + // Prevent reordering with any access of initialization state + OrderAccess::loadload(); assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state "only fully initialized state can have a null lock"); return lock; @@ -775,6 +780,41 @@ } } +// Eagerly initialize superinterfaces that declare default methods (concrete instance: any access) +void InstanceKlass::initialize_super_interfaces(instanceKlassHandle this_oop, TRAPS) { + if (this_oop->has_default_methods()) { + for (int i = 0; i < this_oop->local_interfaces()->length(); ++i) { + Klass* iface = this_oop->local_interfaces()->at(i); + InstanceKlass* ik = InstanceKlass::cast(iface); + if (ik->should_be_initialized()) { + if (ik->has_default_methods()) { + ik->initialize_super_interfaces(ik, THREAD); + } + // Only initialize() interfaces that "declare" concrete methods. + // has_default_methods drives searching superinterfaces since it + // means has_default_methods in its superinterface hierarchy + if (!HAS_PENDING_EXCEPTION && ik->declares_default_methods()) { + ik->initialize(THREAD); + } + if (HAS_PENDING_EXCEPTION) { + Handle e(THREAD, PENDING_EXCEPTION); + CLEAR_PENDING_EXCEPTION; + { + EXCEPTION_MARK; + // Locks object, set state, and notify all waiting threads + this_oop->set_initialization_state_and_notify( + initialization_error, THREAD); + + // ignore any exception thrown, superclass initialization error is + // thrown below + CLEAR_PENDING_EXCEPTION; + } + THROW_OOP(e()); + } + } + } + } +} void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { // Make sure klass is linked (verified) before initialization @@ -854,33 +894,11 @@ } } + // Recursively initialize any superinterfaces that declare default methods + // Only need to recurse if has_default_methods which includes declaring and + // inheriting default methods if (this_oop->has_default_methods()) { - // Step 7.5: initialize any interfaces which have default methods - for (int i = 0; i < this_oop->local_interfaces()->length(); ++i) { - Klass* iface = this_oop->local_interfaces()->at(i); - InstanceKlass* ik = InstanceKlass::cast(iface); - if (ik->has_default_methods() && ik->should_be_initialized()) { - ik->initialize(THREAD); - - if (HAS_PENDING_EXCEPTION) { - Handle e(THREAD, PENDING_EXCEPTION); - CLEAR_PENDING_EXCEPTION; - { - EXCEPTION_MARK; - // Locks object, set state, and notify all waiting threads - this_oop->set_initialization_state_and_notify( - initialization_error, THREAD); - - // ignore any exception thrown, superclass initialization error is - // thrown below - CLEAR_PENDING_EXCEPTION; - } - DTRACE_CLASSINIT_PROBE_WAIT( - super__failed, InstanceKlass::cast(this_oop()), -1, wait); - THROW_OOP(e()); - } - } - } + this_oop->initialize_super_interfaces(this_oop, CHECK); } // Step 8 @@ -1827,6 +1845,9 @@ return id; } +int nmethodBucket::decrement() { + return Atomic::add(-1, (volatile int *)&_count); +} // // Walk the list of dependent nmethods searching for nmethods which @@ -1841,7 +1862,7 @@ nmethod* nm = b->get_nmethod(); // since dependencies aren't removed until an nmethod becomes a zombie, // the dependency list may contain nmethods which aren't alive. - if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) { + if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) { if (TraceDependencies) { ResourceMark rm; tty->print_cr("Marked for deoptimization"); @@ -1858,6 +1879,43 @@ return found; } +void InstanceKlass::clean_dependent_nmethods() { + assert_locked_or_safepoint(CodeCache_lock); + + if (has_unloaded_dependent()) { + nmethodBucket* b = _dependencies; + nmethodBucket* last = NULL; + while (b != NULL) { + assert(b->count() >= 0, err_msg("bucket count: %d", b->count())); + + nmethodBucket* next = b->next(); + + if (b->count() == 0) { + if (last == NULL) { + _dependencies = next; + } else { + last->set_next(next); + } + delete b; + // last stays the same. + } else { + last = b; + } + + b = next; + } + set_has_unloaded_dependent(false); + } +#ifdef ASSERT + else { + // Verification + for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) { + assert(b->count() >= 0, err_msg("bucket count: %d", b->count())); + assert(b->count() != 0, "empty buckets need to be cleaned"); + } + } +#endif +} // // Add an nmethodBucket to the list of dependencies for this nmethod. @@ -1892,13 +1950,10 @@ nmethodBucket* last = NULL; while (b != NULL) { if (nm == b->get_nmethod()) { - if (b->decrement() == 0) { - if (last == NULL) { - _dependencies = b->next(); - } else { - last->set_next(b->next()); - } - delete b; + int val = b->decrement(); + guarantee(val >= 0, err_msg("Underflow: %d", val)); + if (val == 0) { + set_has_unloaded_dependent(true); } return; } @@ -1937,6 +1992,10 @@ nmethodBucket* b = _dependencies; while (b != NULL) { if (nm == b->get_nmethod()) { +#ifdef ASSERT + int count = b->count(); + assert(count >= 0, err_msg("count shouldn't be negative: %d", count)); +#endif return true; } b = b->next(); @@ -2141,12 +2200,6 @@ // closure's do_metadata() method dictates whether the given closure should be // applied to the klass ptr in the object header. -#define if_do_metadata_checked(closure, nv_suffix) \ - /* Make sure the non-virtual and the virtual versions match. */ \ - assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ - "Inconsistency in do_metadata"); \ - if (closure->do_metadata##nv_suffix()) - #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ \ int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ @@ -2170,10 +2223,9 @@ int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \ OopClosureType* closure) { \ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ - /* header */ \ - if_do_metadata_checked(closure, nv_suffix) { \ - closure->do_klass##nv_suffix(obj->klass()); \ - } \ + \ + assert_should_ignore_metadata(closure, nv_suffix); \ + \ /* instance variables */ \ InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ obj, \ @@ -2242,7 +2294,7 @@ #endif // INCLUDE_ALL_GCS void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) { - assert(is_loader_alive(is_alive), "this klass should be live"); + assert(class_loader_data()->is_alive(is_alive), "this klass should be live"); if (is_interface()) { if (ClassUnloading) { Klass* impl = implementor(); @@ -2294,12 +2346,14 @@ array_klasses_do(remove_unshareable_in_class); } -void restore_unshareable_in_class(Klass* k, TRAPS) { - k->restore_unshareable_info(CHECK); +static void restore_unshareable_in_class(Klass* k, TRAPS) { + // Array classes have null protection domain. + // --> see ArrayKlass::complete_create_array_klass() + k->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK); } -void InstanceKlass::restore_unshareable_info(TRAPS) { - Klass::restore_unshareable_info(CHECK); +void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) { + Klass::restore_unshareable_info(loader_data, protection_domain, CHECK); instanceKlassHandle ik(THREAD, this); Array* methods = ik->methods(); @@ -2325,6 +2379,38 @@ ik->array_klasses_do(restore_unshareable_in_class, CHECK); } +// returns true IFF is_in_error_state() has been changed as a result of this call. +bool InstanceKlass::check_sharing_error_state() { + assert(DumpSharedSpaces, "should only be called during dumping"); + bool old_state = is_in_error_state(); + + if (!is_in_error_state()) { + bool bad = false; + for (InstanceKlass* sup = java_super(); sup; sup = sup->java_super()) { + if (sup->is_in_error_state()) { + bad = true; + break; + } + } + if (!bad) { + Array* interfaces = transitive_interfaces(); + for (int i = 0; i < interfaces->length(); i++) { + Klass* iface = interfaces->at(i); + if (InstanceKlass::cast(iface)->is_in_error_state()) { + bad = true; + break; + } + } + } + + if (bad) { + set_in_error_state(); + } + } + + return (old_state != is_in_error_state()); +} + static void clear_all_breakpoints(Method* m) { m->clear_all_breakpoints(); } @@ -2814,6 +2900,22 @@ OsrList_lock->unlock(); } +int InstanceKlass::mark_osr_nmethods(const Method* m) { + // This is a short non-blocking critical region, so the no safepoint check is ok. + MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag); + nmethod* osr = osr_nmethods_head(); + int found = 0; + while (osr != NULL) { + assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); + if (osr->method() == m) { + osr->mark_for_deoptimization(); + found++; + } + osr = osr->osr_link(); + } + return found; +} + nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const { // This is a short non-blocking critical region, so the no safepoint check is ok. OsrList_lock->lock_without_safepoint_check(); @@ -2855,28 +2957,27 @@ return NULL; } -void InstanceKlass::add_member_name(int index, Handle mem_name) { +bool InstanceKlass::add_member_name(Handle mem_name) { jweak mem_name_wref = JNIHandles::make_weak_global(mem_name); MutexLocker ml(MemberNameTable_lock); - assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds"); DEBUG_ONLY(No_Safepoint_Verifier nsv); + // Check if method has been redefined while taking out MemberNameTable_lock, if so + // return false. We cannot cache obsolete methods. They will crash when the function + // is called! + Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name()); + if (method->is_obsolete()) { + return false; + } else if (method->is_old()) { + // Replace method with redefined version + java_lang_invoke_MemberName::set_vmtarget(mem_name(), method_with_idnum(method->method_idnum())); + } + if (_member_names == NULL) { _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count()); } - _member_names->add_member_name(index, mem_name_wref); -} - -oop InstanceKlass::get_member_name(int index) { - MutexLocker ml(MemberNameTable_lock); - assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds"); - DEBUG_ONLY(No_Safepoint_Verifier nsv); - - if (_member_names == NULL) { - return NULL; - } - oop mem_name =_member_names->get_member_name(index); - return mem_name; + _member_names->add_member_name(mem_name_wref); + return true; } // ----------------------------------------------------------------------------------------------------- @@ -3051,8 +3152,7 @@ offset <= (juint) value->length() && offset + length <= (juint) value->length()) { st->print(BULLET"string: "); - Handle h_obj(obj); - java_lang_String::print(h_obj, st); + java_lang_String::print(obj, st); st->cr(); if (!WizardMode) return; // that is enough } --- ./hotspot/src/share/vm/oops/instanceKlass.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/instanceKlass.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -226,14 +226,16 @@ // _is_marked_dependent can be set concurrently, thus cannot be part of the // _misc_flags. bool _is_marked_dependent; // used for marking during flushing and deoptimization + bool _has_unloaded_dependent; enum { - _misc_rewritten = 1 << 0, // methods rewritten. - _misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops - _misc_should_verify_class = 1 << 2, // allow caching of preverification - _misc_is_anonymous = 1 << 3, // has embedded _host_klass field - _misc_is_contended = 1 << 4, // marked with contended annotation - _misc_has_default_methods = 1 << 5 // class/superclass/implemented interfaces has default methods + _misc_rewritten = 1 << 0, // methods rewritten. + _misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops + _misc_should_verify_class = 1 << 2, // allow caching of preverification + _misc_is_anonymous = 1 << 3, // has embedded _host_klass field + _misc_is_contended = 1 << 4, // marked with contended annotation + _misc_has_default_methods = 1 << 5, // class/superclass/implemented interfaces has default methods + _misc_declares_default_methods = 1 << 6 // directly declares default methods (any access) }; u2 _misc_flags; u2 _minor_version; // minor version number of class file @@ -473,6 +475,9 @@ bool is_marked_dependent() const { return _is_marked_dependent; } void set_is_marked_dependent(bool value) { _is_marked_dependent = value; } + bool has_unloaded_dependent() const { return _has_unloaded_dependent; } + void set_has_unloaded_dependent(bool value) { _has_unloaded_dependent = value; } + // initialization (virtuals from Klass) bool should_be_initialized() const; // means that initialize should be called void initialize(TRAPS); @@ -682,6 +687,17 @@ } } + bool declares_default_methods() const { + return (_misc_flags & _misc_declares_default_methods) != 0; + } + void set_declares_default_methods(bool b) { + if (b) { + _misc_flags |= _misc_declares_default_methods; + } else { + _misc_flags &= ~_misc_declares_default_methods; + } + } + // for adding methods, ConstMethod::UNSET_IDNUM means no more ids available inline u2 next_method_idnum(); void set_initial_method_idnum(u2 value) { _idnum_allocated_count = value; } @@ -773,6 +789,7 @@ void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; }; void add_osr_nmethod(nmethod* n); void remove_osr_nmethod(nmethod* n); + int mark_osr_nmethods(const Method* m); nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const; // Breakpoint support (see methods on Method* for details) @@ -952,6 +969,7 @@ void clean_implementors_list(BoolObjectClosure* is_alive); void clean_method_data(BoolObjectClosure* is_alive); + void clean_dependent_nmethods(); // Explicit metaspace deallocation of fields // For RedefineClasses and class file parsing errors, we need to deallocate @@ -1005,6 +1023,13 @@ u2 idnum_allocated_count() const { return _idnum_allocated_count; } +public: + void set_in_error_state() { + assert(DumpSharedSpaces, "only call this when dumping archive"); + _init_state = initialization_error; + } + bool check_sharing_error_state(); + private: // initialization state #ifdef ASSERT @@ -1040,6 +1065,7 @@ static bool link_class_impl (instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS); static bool verify_code (instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS); static void initialize_impl (instanceKlassHandle this_oop, TRAPS); + static void initialize_super_interfaces (instanceKlassHandle this_oop, TRAPS); static void eager_initialize_impl (instanceKlassHandle this_oop); static void set_initialization_state_and_notify_impl (instanceKlassHandle this_oop, ClassState state, TRAPS); static void call_class_initializer_impl (instanceKlassHandle this_oop, TRAPS); @@ -1063,7 +1089,7 @@ public: // CDS support - remove and restore oops from metadata. Oops are not shared. virtual void remove_unshareable_info(); - virtual void restore_unshareable_info(TRAPS); + virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS); // jvm support jint compute_modifier_flags(TRAPS) const; @@ -1071,8 +1097,7 @@ // JSR-292 support MemberNameTable* member_names() { return _member_names; } void set_member_names(MemberNameTable* member_names) { _member_names = member_names; } - void add_member_name(int index, Handle member_name); - oop get_member_name(int index); + bool add_member_name(Handle member_name); public: // JVMTI support @@ -1240,7 +1265,7 @@ } int count() { return _count; } int increment() { _count += 1; return _count; } - int decrement() { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; } + int decrement(); nmethodBucket* next() { return _next; } void set_next(nmethodBucket* b) { _next = b; } nmethod* get_nmethod() { return _nmethod; } --- ./hotspot/src/share/vm/oops/instanceMirrorKlass.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/instanceMirrorKlass.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,6 +28,7 @@ #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/genOopClosures.inline.hpp" +#include "memory/iterator.inline.hpp" #include "memory/oopFactory.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceMirrorKlass.hpp" @@ -41,7 +42,7 @@ #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/parNew/parOopClosures.inline.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" @@ -241,12 +242,6 @@ return oop_size(obj); \ -#define if_do_metadata_checked(closure, nv_suffix) \ - /* Make sure the non-virtual and the virtual versions match. */ \ - assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ - "Inconsistency in do_metadata"); \ - if (closure->do_metadata##nv_suffix()) - // Macro to define InstanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for // all closures. Macros calling macros above for each oop size. --- ./hotspot/src/share/vm/oops/instanceRefKlass.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/instanceRefKlass.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -38,7 +38,7 @@ #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/parNew/parOopClosures.inline.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" --- ./hotspot/src/share/vm/oops/klass.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/klass.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -36,11 +36,13 @@ #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/oop.inline2.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "trace/traceMacros.hpp" #include "utilities/stack.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" @@ -167,7 +169,12 @@ _primary_supers[0] = k; set_super_check_offset(in_bytes(primary_supers_offset())); - set_java_mirror(NULL); + // The constructor is used from init_self_patching_vtbl_list, + // which doesn't zero out the memory before calling the constructor. + // Need to set the field explicitly to not hit an assert that the field + // should be NULL before setting it. + _java_mirror = NULL; + set_modifier_flags(0); set_layout_helper(Klass::_lh_neutral_value); set_name(NULL); @@ -186,6 +193,7 @@ // The klass doesn't have any references at this point. clear_modified_oops(); clear_accumulated_modified_oops(); + _shared_class_path_index = -1; } jint Klass::array_layout_helper(BasicType etype) { @@ -399,7 +407,7 @@ return mirror_alive; } -void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) { +void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses) { if (!ClassUnloading) { return; } @@ -444,7 +452,7 @@ } // Clean the implementors list and method data. - if (current->oop_is_instance()) { + if (clean_alive_klasses && current->oop_is_instance()) { InstanceKlass* ik = InstanceKlass::cast(current); ik->clean_implementors_list(is_alive); ik->clean_method_data(is_alive); @@ -456,12 +464,18 @@ record_modified_oops(); } -void Klass::klass_update_barrier_set_pre(void* p, oop v) { - // This barrier used by G1, where it's used remember the old oop values, - // so that we don't forget any objects that were live at the snapshot at - // the beginning. This function is only used when we write oops into - // Klasses. Since the Klasses are used as roots in G1, we don't have to - // do anything here. +// This barrier is used by G1 to remember the old oop values, so +// that we don't forget any objects that were live at the snapshot at +// the beginning. This function is only used when we write oops into Klasses. +void Klass::klass_update_barrier_set_pre(oop* p, oop v) { +#if INCLUDE_ALL_GCS + if (UseG1GC) { + oop obj = *p; + if (obj != NULL) { + G1SATBCardTableModRefBS::enqueue(obj); + } + } +#endif } void Klass::klass_oop_store(oop* p, oop v) { @@ -472,7 +486,7 @@ if (always_do_update_barrier) { klass_oop_store((volatile oop*)p, v); } else { - klass_update_barrier_set_pre((void*)p, v); + klass_update_barrier_set_pre(p, v); *p = v; klass_update_barrier_set(v); } @@ -482,7 +496,7 @@ assert(!Universe::heap()->is_in_reserved((void*)p), "Should store pointer into metadata"); assert(v == NULL || Universe::heap()->is_in_reserved((void*)v), "Should store pointer to an object"); - klass_update_barrier_set_pre((void*)p, v); + klass_update_barrier_set_pre((oop*)p, v); // Cast away volatile. OrderAccess::release_store_ptr(p, v); klass_update_barrier_set(v); } @@ -504,27 +518,25 @@ set_class_loader_data(NULL); } -void Klass::restore_unshareable_info(TRAPS) { +void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) { TRACE_INIT_ID(this); // If an exception happened during CDS restore, some of these fields may already be // set. We leave the class on the CLD list, even if incomplete so that we don't // modify the CLD list outside a safepoint. if (class_loader_data() == NULL) { - ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data(); - // Restore class_loader_data to the null class loader data + // Restore class_loader_data set_class_loader_data(loader_data); - // Add to null class loader list first before creating the mirror + // Add to class loader list first before creating the mirror // (same order as class file parsing) loader_data->add_class(this); } - // Recreate the class mirror. The protection_domain is always null for - // boot loader, for now. + // Recreate the class mirror. // Only recreate it if not present. A previous attempt to restore may have // gotten an OOM later but keep the mirror if it was created. if (java_mirror() == NULL) { - java_lang_Class::create_mirror(this, Handle(NULL), CHECK); + java_lang_Class::create_mirror(this, class_loader(), protection_domain, CHECK); } } @@ -707,3 +719,21 @@ } #endif + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +class TestKlass { + public: + static void test_oop_is_instanceClassLoader() { + assert(SystemDictionary::ClassLoader_klass()->oop_is_instanceClassLoader(), "assert"); + assert(!SystemDictionary::String_klass()->oop_is_instanceClassLoader(), "assert"); + } +}; + +void TestKlass_test() { + TestKlass::test_oop_is_instanceClassLoader(); +} + +#endif --- ./hotspot/src/share/vm/oops/klass.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/klass.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -32,7 +32,6 @@ #include "oops/klassPS.hpp" #include "oops/metadata.hpp" #include "oops/oop.hpp" -#include "runtime/orderAccess.hpp" #include "trace/traceMacros.hpp" #include "utilities/accessFlags.hpp" #include "utilities/macros.hpp" @@ -177,6 +176,16 @@ jbyte _modified_oops; // Card Table Equivalent (YC/CMS support) jbyte _accumulated_modified_oops; // Mod Union Equivalent (CMS support) +private: + // This is an index into FileMapHeader::_classpath_entry_table[], to + // associate this class with the JAR file where it's loaded from during + // dump time. If a class is not loaded from the shared archive, this field is + // -1. + jshort _shared_class_path_index; + + friend class SharedClassUtil; +protected: + // Constructor Klass(); @@ -283,6 +292,15 @@ void clear_accumulated_modified_oops() { _accumulated_modified_oops = 0; } bool has_accumulated_modified_oops() { return _accumulated_modified_oops == 1; } + int shared_classpath_index() const { + return _shared_class_path_index; + }; + + void set_shared_classpath_index(int index) { + _shared_class_path_index = index; + }; + + protected: // internal accessors Klass* subklass_oop() const { return _subklass; } Klass* next_sibling_oop() const { return _next_sibling; } @@ -455,7 +473,7 @@ public: // CDS support - remove and restore oops from metadata. Oops are not shared. virtual void remove_unshareable_info(); - virtual void restore_unshareable_info(TRAPS); + virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS); protected: // computes the subtype relationship @@ -502,6 +520,7 @@ virtual bool oop_is_objArray_slow() const { return false; } virtual bool oop_is_typeArray_slow() const { return false; } public: + virtual bool oop_is_instanceClassLoader() const { return false; } virtual bool oop_is_instanceMirror() const { return false; } virtual bool oop_is_instanceRef() const { return false; } @@ -585,36 +604,9 @@ // The is_alive closure passed in depends on the Garbage Collector used. bool is_loader_alive(BoolObjectClosure* is_alive); - static void clean_weak_klass_links(BoolObjectClosure* is_alive); - - // Prefetch within oop iterators. This is a macro because we - // can't guarantee that the compiler will inline it. In 64-bit - // it generally doesn't. Signature is - // - // static void prefetch_beyond(oop* const start, - // oop* const end, - // const intx foffset, - // const Prefetch::style pstyle); -#define prefetch_beyond(start, end, foffset, pstyle) { \ - const intx foffset_ = (foffset); \ - const Prefetch::style pstyle_ = (pstyle); \ - assert(foffset_ > 0, "prefetch beyond, not behind"); \ - if (pstyle_ != Prefetch::do_none) { \ - oop* ref = (start); \ - if (ref < (end)) { \ - switch (pstyle_) { \ - case Prefetch::do_read: \ - Prefetch::read(*ref, foffset_); \ - break; \ - case Prefetch::do_write: \ - Prefetch::write(*ref, foffset_); \ - break; \ - default: \ - ShouldNotReachHere(); \ - break; \ - } \ - } \ - } \ + static void clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses = true); + static void clean_subklass_tree(BoolObjectClosure* is_alive) { + clean_weak_klass_links(is_alive, false /* clean_alive_klasses */); } // iterators @@ -722,7 +714,7 @@ private: // barriers used by klass_oop_store void klass_update_barrier_set(oop v); - void klass_update_barrier_set_pre(void* p, oop v); + void klass_update_barrier_set_pre(oop* p, oop v); }; #endif // SHARE_VM_OOPS_KLASS_HPP --- ./hotspot/src/share/vm/oops/method.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/method.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -49,6 +49,7 @@ #include "runtime/compilationPolicy.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/relocator.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" @@ -92,7 +93,7 @@ set_hidden(false); set_dont_inline(false); set_method_data(NULL); - set_method_counters(NULL); + clear_method_counters(); set_vtable_index(Method::garbage_vtable_index); // Fix and bury in Method* @@ -116,7 +117,7 @@ MetadataFactory::free_metadata(loader_data, method_data()); set_method_data(NULL); MetadataFactory::free_metadata(loader_data, method_counters()); - set_method_counters(NULL); + clear_method_counters(); // The nmethod will be gone when we get here. if (code() != NULL) _code = NULL; } @@ -387,9 +388,7 @@ methodHandle mh(m); ClassLoaderData* loader_data = mh->method_holder()->class_loader_data(); MethodCounters* counters = MethodCounters::allocate(loader_data, CHECK_NULL); - if (mh->method_counters() == NULL) { - mh->set_method_counters(counters); - } else { + if (!mh->init_method_counters(counters)) { MetadataFactory::free_metadata(loader_data, counters); } return mh->method_counters(); @@ -559,6 +558,15 @@ return true; } +bool Method::is_constant_getter() const { + int last_index = code_size() - 1; + // Check if the first 1-3 bytecodes are a constant push + // and the last bytecode is a return. + return (2 <= code_size() && code_size() <= 4 && + Bytecodes::is_const(java_code_at(0)) && + Bytecodes::length_for(java_code_at(0)) == last_index && + Bytecodes::is_return(java_code_at(last_index))); +} bool Method::is_initializer() const { return name() == vmSymbols::object_initializer_name() || is_static_initializer(); @@ -730,8 +738,8 @@ } if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) { ttyLocker ttyl; - xtty->begin_elem("make_not_%scompilable thread='" UINTX_FORMAT "'", - is_osr ? "osr_" : "", os::current_thread_id()); + xtty->begin_elem("make_not_compilable thread='" UINTX_FORMAT "' osr='%d' level='%d'", + os::current_thread_id(), is_osr, comp_level); if (reason != NULL) { xtty->print(" reason=\'%s\'", reason); } @@ -851,7 +859,7 @@ assert(!DumpSharedSpaces || _method_data == NULL, "unexpected method data?"); set_method_data(NULL); - set_method_counters(NULL); + clear_method_counters(); } // Called when the method_holder is getting linked. Setup entrypoints so the method @@ -1635,34 +1643,34 @@ } int Method::highest_comp_level() const { - const MethodData* mdo = method_data(); - if (mdo != NULL) { - return mdo->highest_comp_level(); + const MethodCounters* mcs = method_counters(); + if (mcs != NULL) { + return mcs->highest_comp_level(); } else { return CompLevel_none; } } int Method::highest_osr_comp_level() const { - const MethodData* mdo = method_data(); - if (mdo != NULL) { - return mdo->highest_osr_comp_level(); + const MethodCounters* mcs = method_counters(); + if (mcs != NULL) { + return mcs->highest_osr_comp_level(); } else { return CompLevel_none; } } void Method::set_highest_comp_level(int level) { - MethodData* mdo = method_data(); - if (mdo != NULL) { - mdo->set_highest_comp_level(level); + MethodCounters* mcs = method_counters(); + if (mcs != NULL) { + mcs->set_highest_comp_level(level); } } void Method::set_highest_osr_comp_level(int level) { - MethodData* mdo = method_data(); - if (mdo != NULL) { - mdo->set_highest_osr_comp_level(level); + MethodCounters* mcs = method_counters(); + if (mcs != NULL) { + mcs->set_highest_osr_comp_level(level); } } @@ -1864,9 +1872,12 @@ void Method::set_on_stack(const bool value) { // Set both the method itself and its constant pool. The constant pool // on stack means some method referring to it is also on the stack. - _access_flags.set_on_stack(value); constants()->set_on_stack(value); - if (value) MetadataOnStackMark::record(this); + + bool succeeded = _access_flags.set_on_stack(value); + if (value && succeeded) { + MetadataOnStackMark::record(this, Thread::current()); + } } // Called when the class loader is unloaded to make all methods weak. --- ./hotspot/src/share/vm/oops/method.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/method.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -233,10 +233,11 @@ // Tracking number of breakpoints, for fullspeed debugging. // Only mutated by VM thread. u2 number_of_breakpoints() const { - if (method_counters() == NULL) { + MethodCounters* mcs = method_counters(); + if (mcs == NULL) { return 0; } else { - return method_counters()->number_of_breakpoints(); + return mcs->number_of_breakpoints(); } } void incr_number_of_breakpoints(TRAPS) { @@ -253,8 +254,9 @@ } // Initialization only void clear_number_of_breakpoints() { - if (method_counters() != NULL) { - method_counters()->clear_number_of_breakpoints(); + MethodCounters* mcs = method_counters(); + if (mcs != NULL) { + mcs->clear_number_of_breakpoints(); } } @@ -301,10 +303,11 @@ } int interpreter_throwout_count() const { - if (method_counters() == NULL) { + MethodCounters* mcs = method_counters(); + if (mcs == NULL) { return 0; } else { - return method_counters()->interpreter_throwout_count(); + return mcs->interpreter_throwout_count(); } } @@ -362,11 +365,13 @@ return _method_counters; } - void set_method_counters(MethodCounters* counters) { - // The store into method must be released. On platforms without - // total store order (TSO) the reference may become visible before - // the initialization of data otherwise. - OrderAccess::release_store_ptr((volatile void *)&_method_counters, counters); + void clear_method_counters() { + _method_counters = NULL; + } + + bool init_method_counters(MethodCounters* counters) { + // Try to install a pointer to MethodCounters, return true on success. + return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL; } #ifdef TIERED @@ -379,26 +384,28 @@ return method_counters()->interpreter_invocation_count(); } } - void set_prev_event_count(int count, TRAPS) { - MethodCounters* mcs = get_method_counters(CHECK); + void set_prev_event_count(int count) { + MethodCounters* mcs = method_counters(); if (mcs != NULL) { mcs->set_interpreter_invocation_count(count); } } jlong prev_time() const { - return method_counters() == NULL ? 0 : method_counters()->prev_time(); + MethodCounters* mcs = method_counters(); + return mcs == NULL ? 0 : mcs->prev_time(); } - void set_prev_time(jlong time, TRAPS) { - MethodCounters* mcs = get_method_counters(CHECK); + void set_prev_time(jlong time) { + MethodCounters* mcs = method_counters(); if (mcs != NULL) { mcs->set_prev_time(time); } } float rate() const { - return method_counters() == NULL ? 0 : method_counters()->rate(); + MethodCounters* mcs = method_counters(); + return mcs == NULL ? 0 : mcs->rate(); } - void set_rate(float rate, TRAPS) { - MethodCounters* mcs = get_method_counters(CHECK); + void set_rate(float rate) { + MethodCounters* mcs = method_counters(); if (mcs != NULL) { mcs->set_rate(rate); } @@ -416,9 +423,12 @@ static MethodCounters* build_method_counters(Method* m, TRAPS); int interpreter_invocation_count() { - if (TieredCompilation) return invocation_count(); - else return (method_counters() == NULL) ? 0 : - method_counters()->interpreter_invocation_count(); + if (TieredCompilation) { + return invocation_count(); + } else { + MethodCounters* mcs = method_counters(); + return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count(); + } } int increment_interpreter_invocation_count(TRAPS) { if (TieredCompilation) ShouldNotReachHere(); @@ -609,6 +619,9 @@ // returns true if the method is an accessor function (setter/getter). bool is_accessor() const; + // returns true if the method does nothing but return a constant of primitive type + bool is_constant_getter() const; + // returns true if the method is an initializer ( or ). bool is_initializer() const; @@ -783,6 +796,10 @@ return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; } + int mark_osr_nmethods() { + return method_holder()->mark_osr_nmethods(this); + } + nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); } --- ./hotspot/src/share/vm/oops/methodCounters.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/methodCounters.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -34,4 +34,40 @@ backedge_counter()->reset(); set_interpreter_throwout_count(0); set_interpreter_invocation_count(0); +#ifdef TIERED + set_prev_time(0); + set_rate(0); + set_highest_comp_level(0); + set_highest_osr_comp_level(0); +#endif } + + +int MethodCounters::highest_comp_level() const { +#ifdef TIERED + return _highest_comp_level; +#else + return CompLevel_none; +#endif +} + +void MethodCounters::set_highest_comp_level(int level) { +#ifdef TIERED + _highest_comp_level = level; +#endif +} + +int MethodCounters::highest_osr_comp_level() const { +#ifdef TIERED + return _highest_osr_comp_level; +#else + return CompLevel_none; +#endif +} + +void MethodCounters::set_highest_osr_comp_level(int level) { +#ifdef TIERED + _highest_osr_comp_level = level; +#endif +} + --- ./hotspot/src/share/vm/oops/methodCounters.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/methodCounters.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -39,6 +39,8 @@ #ifdef TIERED float _rate; // Events (invocation and backedge counter increments) per millisecond + u1 _highest_comp_level; // Highest compile level this method has ever seen. + u1 _highest_osr_comp_level; // Same for OSR level jlong _prev_time; // Previous time the rate was acquired #endif @@ -47,6 +49,8 @@ _number_of_breakpoints(0) #ifdef TIERED , _rate(0), + _highest_comp_level(0), + _highest_osr_comp_level(0), _prev_time(0) #endif { @@ -100,6 +104,11 @@ void set_rate(float rate) { _rate = rate; } #endif + int highest_comp_level() const; + void set_highest_comp_level(int level); + int highest_osr_comp_level() const; + void set_highest_osr_comp_level(int level); + // invocation counter InvocationCounter* invocation_counter() { return &_invocation_counter; } InvocationCounter* backedge_counter() { return &_backedge_counter; } --- ./hotspot/src/share/vm/oops/methodData.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/methodData.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -34,6 +34,7 @@ #include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/orderAccess.inline.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -1152,9 +1153,7 @@ _backedge_counter_start = 0; _num_loops = 0; _num_blocks = 0; - _highest_comp_level = 0; - _highest_osr_comp_level = 0; - _would_profile = true; + _would_profile = unknown; #if INCLUDE_RTM_OPT _rtm_state = NoRTM; // No RTM lock eliding by default --- ./hotspot/src/share/vm/oops/methodData.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/methodData.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -2057,7 +2057,7 @@ // Whole-method sticky bits and flags enum { - _trap_hist_limit = 19, // decoupled from Deoptimization::Reason_LIMIT + _trap_hist_limit = 20, // decoupled from Deoptimization::Reason_LIMIT _trap_hist_mask = max_jubyte, _extra_data_count = 4 // extra DataLayout headers, for trap history }; // Public flag values @@ -2098,12 +2098,9 @@ // time with C1. It is used to determine if method is trivial. short _num_loops; short _num_blocks; - // Highest compile level this method has ever seen. - u1 _highest_comp_level; - // Same for OSR level - u1 _highest_osr_comp_level; // Does this method contain anything worth profiling? - bool _would_profile; + enum WouldProfile {unknown, no_profile, profile}; + WouldProfile _would_profile; // Size of _data array in bytes. (Excludes header and extra_data fields.) int _data_size; @@ -2272,13 +2269,8 @@ } #endif - void set_would_profile(bool p) { _would_profile = p; } - bool would_profile() const { return _would_profile; } - - int highest_comp_level() const { return _highest_comp_level; } - void set_highest_comp_level(int level) { _highest_comp_level = level; } - int highest_osr_comp_level() const { return _highest_osr_comp_level; } - void set_highest_osr_comp_level(int level) { _highest_osr_comp_level = level; } + void set_would_profile(bool p) { _would_profile = p ? profile : no_profile; } + bool would_profile() const { return _would_profile != no_profile; } int num_loops() const { return _num_loops; } void set_num_loops(int n) { _num_loops = n; } --- ./hotspot/src/share/vm/oops/objArrayKlass.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/objArrayKlass.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,6 +29,7 @@ #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/genOopClosures.inline.hpp" +#include "memory/iterator.inline.hpp" #include "memory/metadataFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" @@ -42,6 +43,7 @@ #include "oops/symbol.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.inline.hpp" #include "utilities/copy.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS @@ -49,7 +51,7 @@ #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/parNew/parOopClosures.inline.hpp" #include "gc_implementation/parallelScavenge/psCompactionManager.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" @@ -475,12 +477,6 @@ } #endif // INCLUDE_ALL_GCS -#define if_do_metadata_checked(closure, nv_suffix) \ - /* Make sure the non-virtual and the virtual versions match. */ \ - assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ - "Inconsistency in do_metadata"); \ - if (closure->do_metadata##nv_suffix()) - #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ \ int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \ --- ./hotspot/src/share/vm/oops/objArrayOop.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/objArrayOop.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -45,9 +45,10 @@ private: // Give size of objArrayOop in HeapWords minus the header static int array_size(int length) { - const int OopsPerHeapWord = HeapWordSize/heapOopSize; + const uint OopsPerHeapWord = HeapWordSize/heapOopSize; assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0), "Else the following (new) computation would be in error"); + uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord; #ifdef ASSERT // The old code is left in for sanity-checking; it'll // go away pretty soon. XXX @@ -55,16 +56,15 @@ // oop->length() * HeapWordsPerOop; // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer. // The oop elements are aligned up to wordSize - const int HeapWordsPerOop = heapOopSize/HeapWordSize; - int old_res; + const uint HeapWordsPerOop = heapOopSize/HeapWordSize; + uint old_res; if (HeapWordsPerOop > 0) { old_res = length * HeapWordsPerOop; } else { - old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord; + old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord; } + assert(res == old_res, "Inconsistency between old and new."); #endif // ASSERT - int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord; - assert(res == old_res, "Inconsistency between old and new."); return res; } --- ./hotspot/src/share/vm/oops/oop.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/oop.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -109,12 +109,13 @@ int size_given_klass(Klass* klass); // type test operations (inlined in oop.inline.h) - bool is_instance() const; - bool is_instanceMirror() const; - bool is_instanceRef() const; - bool is_array() const; - bool is_objArray() const; - bool is_typeArray() const; + bool is_instance() const; + bool is_instanceMirror() const; + bool is_instanceClassLoader() const; + bool is_instanceRef() const; + bool is_array() const; + bool is_objArray() const; + bool is_typeArray() const; private: // field addresses in oop --- ./hotspot/src/share/vm/oops/oop.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/oop.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -38,7 +38,8 @@ #include "oops/klass.inline.hpp" #include "oops/markOop.inline.hpp" #include "oops/oop.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" #include "utilities/macros.hpp" #ifdef TARGET_ARCH_x86 @@ -147,12 +148,13 @@ inline bool oopDesc::is_a(Klass* k) const { return klass()->is_subtype_of(k); } -inline bool oopDesc::is_instance() const { return klass()->oop_is_instance(); } -inline bool oopDesc::is_instanceMirror() const { return klass()->oop_is_instanceMirror(); } -inline bool oopDesc::is_instanceRef() const { return klass()->oop_is_instanceRef(); } -inline bool oopDesc::is_array() const { return klass()->oop_is_array(); } -inline bool oopDesc::is_objArray() const { return klass()->oop_is_objArray(); } -inline bool oopDesc::is_typeArray() const { return klass()->oop_is_typeArray(); } +inline bool oopDesc::is_instance() const { return klass()->oop_is_instance(); } +inline bool oopDesc::is_instanceClassLoader() const { return klass()->oop_is_instanceClassLoader(); } +inline bool oopDesc::is_instanceMirror() const { return klass()->oop_is_instanceMirror(); } +inline bool oopDesc::is_instanceRef() const { return klass()->oop_is_instanceRef(); } +inline bool oopDesc::is_array() const { return klass()->oop_is_array(); } +inline bool oopDesc::is_objArray() const { return klass()->oop_is_objArray(); } +inline bool oopDesc::is_typeArray() const { return klass()->oop_is_typeArray(); } inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } --- ./hotspot/src/share/vm/oops/oop.pcgc.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/oop.pcgc.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -54,8 +54,6 @@ klass()->oop_follow_contents(cm, this); } -// Used by parallel old GC. - inline oop oopDesc::forward_to_atomic(oop p) { assert(ParNewGeneration::is_legal_forward_ptr(p), "illegal forwarding pointer value."); --- ./hotspot/src/share/vm/oops/typeArrayKlass.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/typeArrayKlass.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -39,6 +39,7 @@ #include "oops/typeArrayKlass.hpp" #include "oops/typeArrayOop.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "utilities/macros.hpp" bool TypeArrayKlass::compute_is_subtype_of(Klass* k) { --- ./hotspot/src/share/vm/oops/typeArrayOop.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/oops/typeArrayOop.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -27,39 +27,7 @@ #include "oops/arrayOop.hpp" #include "oops/typeArrayKlass.hpp" -#ifdef TARGET_OS_ARCH_linux_x86 -# include "orderAccess_linux_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_sparc -# include "orderAccess_linux_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_zero -# include "orderAccess_linux_zero.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_x86 -# include "orderAccess_solaris_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_sparc -# include "orderAccess_solaris_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_windows_x86 -# include "orderAccess_windows_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_arm -# include "orderAccess_linux_arm.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_ppc -# include "orderAccess_linux_ppc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_aix_ppc -# include "orderAccess_aix_ppc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_x86 -# include "orderAccess_bsd_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_zero -# include "orderAccess_bsd_zero.inline.hpp" -#endif +#include "runtime/orderAccess.inline.hpp" // A typeArrayOop is an array containing basic types (non oop elements). // It is used for arrays of {characters, singles, doubles, bytes, shorts, integers, longs} @@ -182,7 +150,7 @@ DEBUG_ONLY(BasicType etype = Klass::layout_helper_element_type(lh)); assert(length <= arrayOopDesc::max_array_length(etype), "no overflow"); - julong size_in_bytes = length; + julong size_in_bytes = (juint)length; size_in_bytes <<= element_shift; size_in_bytes += instance_header_size; julong size_in_words = ((size_in_bytes + (HeapWordSize-1)) >> LogHeapWordSize); --- ./hotspot/src/share/vm/opto/bytecodeInfo.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/bytecodeInfo.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -107,7 +107,7 @@ int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) { // Allows targeted inlining - if(callee_method->should_inline()) { + if (callee_method->should_inline()) { *wci_result = *(WarmCallInfo::always_hot()); if (C->print_inlining() && Verbose) { CompileTask::print_inline_indent(inline_level()); @@ -118,6 +118,12 @@ return true; } + if (callee_method->force_inline()) { + set_msg("force inline by annotation"); + _forced_inline = true; + return true; + } + #ifndef PRODUCT int inline_depth = inline_level()+1; if (ciReplay::should_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) { @@ -244,6 +250,11 @@ } #endif + if (callee_method->force_inline()) { + set_msg("force inline by annotation"); + return false; + } + // Now perform checks which are heuristic if (is_unboxing_method(callee_method, C)) { @@ -251,12 +262,10 @@ return false; } - if (!callee_method->force_inline()) { - if (callee_method->has_compiled_code() && - callee_method->instructions_size() > InlineSmallCode) { - set_msg("already compiled into a big method"); - return true; - } + if (callee_method->has_compiled_code() && + callee_method->instructions_size() > InlineSmallCode) { + set_msg("already compiled into a big method"); + return true; } // don't inline exception code unless the top method belongs to an @@ -349,7 +358,7 @@ // Escape Analysis stress testing when running Xcomp or CTW: // inline constructors even if they are not reached. } else if (forced_inline()) { - // Inlining was forced by CompilerOracle or ciReplay + // Inlining was forced by CompilerOracle, ciReplay or annotation } else if (profile.count() == 0) { // don't inline unreached call sites set_msg("call site not reached"); --- ./hotspot/src/share/vm/opto/c2_globals.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/c2_globals.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -473,6 +473,9 @@ product(bool, DoEscapeAnalysis, true, \ "Perform escape analysis") \ \ + product(double, EscapeAnalysisTimeout, 20. DEBUG_ONLY(+40.), \ + "Abort EA when it reaches time limit (in sec)") \ + \ develop(bool, ExitEscapeAnalysisOnTimeout, true, \ "Exit or throw assert in EA when it reaches time limit") \ \ @@ -644,7 +647,7 @@ develop(bool, AlwaysIncrementalInline, false, \ "do all inlining incrementally") \ \ - product(intx, LiveNodeCountInliningCutoff, 20000, \ + product(intx, LiveNodeCountInliningCutoff, 40000, \ "max number of live nodes in a method") \ \ diagnostic(bool, OptimizeExpensiveOps, true, \ @@ -653,8 +656,8 @@ product(bool, UseMathExactIntrinsics, true, \ "Enables intrinsification of various java.lang.Math functions") \ \ - experimental(bool, ReplaceInParentMaps, false, \ - "Propagate type improvements in callers of inlinee if possible") \ + product(bool, UseMultiplyToLenIntrinsic, false, \ + "Enables intrinsification of BigInteger.multiplyToLen()") \ \ product(bool, UseTypeSpeculation, true, \ "Speculatively propagate types from profiles") \ --- ./hotspot/src/share/vm/opto/callGenerator.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/callGenerator.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -63,12 +63,12 @@ } virtual bool is_parse() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); int is_osr() { return _is_osr; } }; -JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* ParseGenerator::generate(JVMState* jvms) { Compile* C = Compile::current(); if (is_osr()) { @@ -80,7 +80,7 @@ return NULL; // bailing out of the compile; do not try to parse } - Parse parser(jvms, method(), _expected_uses, parent_parser); + Parse parser(jvms, method(), _expected_uses); // Grab signature for matching/allocation #ifdef ASSERT if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { @@ -119,12 +119,12 @@ _separate_io_proj(separate_io_proj) { } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); CallStaticJavaNode* call_node() const { return _call_node; } }; -JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* DirectCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); bool is_static = method()->is_static(); address target = is_static ? SharedRuntime::get_resolve_static_call_stub() @@ -171,10 +171,10 @@ vtable_index >= 0, "either invalid or usable"); } virtual bool is_virtual() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); }; -JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* VirtualCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); Node* receiver = kit.argument(0); @@ -276,7 +276,7 @@ // Convert the CallStaticJava into an inline virtual void do_late_inline(); - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { + virtual JVMState* generate(JVMState* jvms) { Compile *C = Compile::current(); C->print_inlining_skip(this); @@ -290,7 +290,7 @@ // that the late inlining logic can distinguish between fall // through and exceptional uses of the memory and io projections // as is done for allocations and macro expansion. - return DirectCallGenerator::generate(jvms, parent_parser); + return DirectCallGenerator::generate(jvms); } virtual void print_inlining_late(const char* msg) { @@ -389,7 +389,7 @@ } // Now perform the inling using the synthesized JVMState - JVMState* new_jvms = _inline_cg->generate(jvms, NULL); + JVMState* new_jvms = _inline_cg->generate(jvms); if (new_jvms == NULL) return; // no change if (C->failing()) return; @@ -407,7 +407,7 @@ C->env()->notice_inlined_method(_inline_cg->method()); C->set_inlining_progress(true); - kit.replace_call(call, result); + kit.replace_call(call, result, true); } @@ -429,8 +429,8 @@ virtual bool is_mh_late_inline() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { - JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser); + virtual JVMState* generate(JVMState* jvms) { + JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); if (_input_not_const) { // inlining won't be possible so no need to enqueue right now. call_node()->set_generator(this); @@ -477,13 +477,13 @@ LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { + virtual JVMState* generate(JVMState* jvms) { Compile *C = Compile::current(); C->print_inlining_skip(this); C->add_string_late_inline(this); - JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); + JVMState* new_jvms = DirectCallGenerator::generate(jvms); return new_jvms; } @@ -500,13 +500,13 @@ LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { + virtual JVMState* generate(JVMState* jvms) { Compile *C = Compile::current(); C->print_inlining_skip(this); C->add_boxing_late_inline(this); - JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); + JVMState* new_jvms = DirectCallGenerator::generate(jvms); return new_jvms; } }; @@ -542,7 +542,7 @@ virtual bool is_virtual() const { return _is_virtual; } virtual bool is_deferred() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); }; @@ -552,12 +552,12 @@ return new WarmCallGenerator(ci, if_cold, if_hot); } -JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* WarmCallGenerator::generate(JVMState* jvms) { Compile* C = Compile::current(); if (C->log() != NULL) { C->log()->elem("warm_call bci='%d'", jvms->bci()); } - jvms = _if_cold->generate(jvms, parent_parser); + jvms = _if_cold->generate(jvms); if (jvms != NULL) { Node* m = jvms->map()->control(); if (m->is_CatchProj()) m = m->in(0); else m = C->top(); @@ -618,7 +618,7 @@ virtual bool is_inline() const { return _if_hit->is_inline(); } virtual bool is_deferred() const { return _if_hit->is_deferred(); } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); }; @@ -630,7 +630,7 @@ } -JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* PredictedCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); // We need an explicit receiver null_check before checking its type. @@ -648,6 +648,10 @@ return kit.transfer_exceptions_into_jvms(); } + // Make a copy of the replaced nodes in case we need to restore them + ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); + replaced_nodes.clone(); + Node* exact_receiver = receiver; // will get updated in place... Node* slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob, @@ -658,7 +662,7 @@ { PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); if (!kit.stopped()) { - slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser); + slow_jvms = _if_missed->generate(kit.sync_jvms()); if (kit.failing()) return NULL; // might happen because of NodeCountInliningCutoff assert(slow_jvms != NULL, "must be"); @@ -679,12 +683,12 @@ kit.replace_in_map(receiver, exact_receiver); // Make the hot call: - JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser); + JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); if (new_jvms == NULL) { // Inline failed, so make a direct call. assert(_if_hit->is_inline(), "must have been a failed inline"); CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); - new_jvms = cg->generate(kit.sync_jvms(), parent_parser); + new_jvms = cg->generate(kit.sync_jvms()); } kit.add_exception_states_from(new_jvms); kit.set_jvms(new_jvms); @@ -701,6 +705,11 @@ return kit.transfer_exceptions_into_jvms(); } + // There are 2 branches and the replaced nodes are only valid on + // one: restore the replaced nodes to what they were before the + // branch. + kit.map()->set_replaced_nodes(replaced_nodes); + // Finish the diamond. kit.C->set_has_split_ifs(true); // Has chance for split-if optimization RegionNode* region = new (kit.C) RegionNode(3); @@ -710,7 +719,15 @@ Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); iophi->set_req(2, slow_map->i_o()); kit.set_i_o(gvn.transform(iophi)); + // Merge memory kit.merge_memory(slow_map->merged_memory(), region, 2); + // Transform new memory Phis. + for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { + Node* phi = mms.memory(); + if (phi->is_Phi() && phi->in(0) == region) { + mms.set_memory(gvn.transform(phi)); + } + } uint tos = kit.jvms()->stkoff() + kit.sp(); uint limit = slow_map->req(); for (uint i = TypeFunc::Parms; i < limit; i++) { @@ -845,7 +862,7 @@ call_does_dispatch, vtable_index); // out-parameters // We lack profiling at this call but type speculation may // provide us with a type - speculative_receiver_type = receiver_type->speculative_type(); + speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; } CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true); @@ -864,15 +881,15 @@ } -//------------------------PredictedIntrinsicGenerator------------------------------ -// Internal class which handles all predicted Intrinsic calls. -class PredictedIntrinsicGenerator : public CallGenerator { +//------------------------PredicatedIntrinsicGenerator------------------------------ +// Internal class which handles all predicated Intrinsic calls. +class PredicatedIntrinsicGenerator : public CallGenerator { CallGenerator* _intrinsic; CallGenerator* _cg; public: - PredictedIntrinsicGenerator(CallGenerator* intrinsic, - CallGenerator* cg) + PredicatedIntrinsicGenerator(CallGenerator* intrinsic, + CallGenerator* cg) : CallGenerator(cg->method()) { _intrinsic = intrinsic; @@ -883,107 +900,186 @@ virtual bool is_inlined() const { return true; } virtual bool is_intrinsic() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); }; -CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic, - CallGenerator* cg) { - return new PredictedIntrinsicGenerator(intrinsic, cg); +CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, + CallGenerator* cg) { + return new PredicatedIntrinsicGenerator(intrinsic, cg); } -JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { + // The code we want to generate here is: + // if (receiver == NULL) + // uncommon_Trap + // if (predicate(0)) + // do_intrinsic(0) + // else + // if (predicate(1)) + // do_intrinsic(1) + // ... + // else + // do_java_comp + GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); CompileLog* log = kit.C->log(); if (log != NULL) { - log->elem("predicted_intrinsic bci='%d' method='%d'", + log->elem("predicated_intrinsic bci='%d' method='%d'", jvms->bci(), log->identify(method())); } - Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms()); - if (kit.failing()) - return NULL; // might happen because of NodeCountInliningCutoff - - SafePointNode* slow_map = NULL; - JVMState* slow_jvms; - if (slow_ctl != NULL) { - PreserveJVMState pjvms(&kit); - kit.set_control(slow_ctl); - if (!kit.stopped()) { - slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser); - if (kit.failing()) - return NULL; // might happen because of NodeCountInliningCutoff - assert(slow_jvms != NULL, "must be"); - kit.add_exception_states_from(slow_jvms); - kit.set_map(slow_jvms->map()); - if (!kit.stopped()) - slow_map = kit.stop(); + if (!method()->is_static()) { + // We need an explicit receiver null_check before checking its type in predicate. + // We share a map with the caller, so his JVMS gets adjusted. + Node* receiver = kit.null_check_receiver_before_call(method()); + if (kit.stopped()) { + return kit.transfer_exceptions_into_jvms(); } } - if (kit.stopped()) { - // Predicate is always false. - kit.set_jvms(slow_jvms); + int n_predicates = _intrinsic->predicates_count(); + assert(n_predicates > 0, "sanity"); + + JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); + + // Region for normal compilation code if intrinsic failed. + Node* slow_region = new (kit.C) RegionNode(1); + + int results = 0; + for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { +#ifdef ASSERT + JVMState* old_jvms = kit.jvms(); + SafePointNode* old_map = kit.map(); + Node* old_io = old_map->i_o(); + Node* old_mem = old_map->memory(); + Node* old_exc = old_map->next_exception(); +#endif + Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); +#ifdef ASSERT + // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. + assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); + SafePointNode* new_map = kit.map(); + assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); + assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); + assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); +#endif + if (!kit.stopped()) { + PreserveJVMState pjvms(&kit); + // Generate intrinsic code: + JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); + if (new_jvms == NULL) { + // Intrinsic failed, use normal compilation path for this predicate. + slow_region->add_req(kit.control()); + } else { + kit.add_exception_states_from(new_jvms); + kit.set_jvms(new_jvms); + if (!kit.stopped()) { + result_jvms[results++] = kit.jvms(); + } + } + } + if (else_ctrl == NULL) { + else_ctrl = kit.C->top(); + } + kit.set_control(else_ctrl); + } + if (!kit.stopped()) { + // Final 'else' after predicates. + slow_region->add_req(kit.control()); + } + if (slow_region->req() > 1) { + PreserveJVMState pjvms(&kit); + // Generate normal compilation code: + kit.set_control(gvn.transform(slow_region)); + JVMState* new_jvms = _cg->generate(kit.sync_jvms()); + if (kit.failing()) + return NULL; // might happen because of NodeCountInliningCutoff + assert(new_jvms != NULL, "must be"); + kit.add_exception_states_from(new_jvms); + kit.set_jvms(new_jvms); + if (!kit.stopped()) { + result_jvms[results++] = kit.jvms(); + } + } + + if (results == 0) { + // All paths ended in uncommon traps. + (void) kit.stop(); return kit.transfer_exceptions_into_jvms(); } - // Generate intrinsic code: - JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser); - if (new_jvms == NULL) { - // Intrinsic failed, so use slow code or make a direct call. - if (slow_map == NULL) { - CallGenerator* cg = CallGenerator::for_direct_call(method()); - new_jvms = cg->generate(kit.sync_jvms(), parent_parser); - } else { - kit.set_jvms(slow_jvms); - return kit.transfer_exceptions_into_jvms(); - } - } - kit.add_exception_states_from(new_jvms); - kit.set_jvms(new_jvms); - - // Need to merge slow and fast? - if (slow_map == NULL) { - // The fast path is the only path remaining. + if (results == 1) { // Only one path + kit.set_jvms(result_jvms[0]); return kit.transfer_exceptions_into_jvms(); } - if (kit.stopped()) { - // Intrinsic method threw an exception, so it's just the slow path after all. - kit.set_jvms(slow_jvms); - return kit.transfer_exceptions_into_jvms(); + // Merge all paths. + kit.C->set_has_split_ifs(true); // Has chance for split-if optimization + RegionNode* region = new (kit.C) RegionNode(results + 1); + Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); + for (int i = 0; i < results; i++) { + JVMState* jvms = result_jvms[i]; + int path = i + 1; + SafePointNode* map = jvms->map(); + region->init_req(path, map->control()); + iophi->set_req(path, map->i_o()); + if (i == 0) { + kit.set_jvms(jvms); + } else { + kit.merge_memory(map->merged_memory(), region, path); + } + } + kit.set_control(gvn.transform(region)); + kit.set_i_o(gvn.transform(iophi)); + // Transform new memory Phis. + for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { + Node* phi = mms.memory(); + if (phi->is_Phi() && phi->in(0) == region) { + mms.set_memory(gvn.transform(phi)); + } } - // Finish the diamond. - kit.C->set_has_split_ifs(true); // Has chance for split-if optimization - RegionNode* region = new (kit.C) RegionNode(3); - region->init_req(1, kit.control()); - region->init_req(2, slow_map->control()); - kit.set_control(gvn.transform(region)); - Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); - iophi->set_req(2, slow_map->i_o()); - kit.set_i_o(gvn.transform(iophi)); - kit.merge_memory(slow_map->merged_memory(), region, 2); + // Merge debug info. + Node** ins = NEW_RESOURCE_ARRAY(Node*, results); uint tos = kit.jvms()->stkoff() + kit.sp(); - uint limit = slow_map->req(); + Node* map = kit.map(); + uint limit = map->req(); for (uint i = TypeFunc::Parms; i < limit; i++) { // Skip unused stack slots; fast forward to monoff(); if (i == tos) { i = kit.jvms()->monoff(); if( i >= limit ) break; } - Node* m = kit.map()->in(i); - Node* n = slow_map->in(i); - if (m != n) { - const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); - Node* phi = PhiNode::make(region, m, t); - phi->set_req(2, n); - kit.map()->set_req(i, gvn.transform(phi)); + Node* n = map->in(i); + ins[0] = n; + const Type* t = gvn.type(n); + bool needs_phi = false; + for (int j = 1; j < results; j++) { + JVMState* jvms = result_jvms[j]; + Node* jmap = jvms->map(); + Node* m = NULL; + if (jmap->req() > i) { + m = jmap->in(i); + if (m != n) { + needs_phi = true; + t = t->meet_speculative(gvn.type(m)); + } + } + ins[j] = m; + } + if (needs_phi) { + Node* phi = PhiNode::make(region, n, t); + for (int j = 1; j < results; j++) { + phi->set_req(j + 1, ins[j]); + } + map->set_req(i, gvn.transform(phi)); } } + return kit.transfer_exceptions_into_jvms(); } @@ -1006,7 +1102,7 @@ virtual bool is_virtual() const { ShouldNotReachHere(); return false; } virtual bool is_trap() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); }; @@ -1018,7 +1114,7 @@ } -JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). int nargs = method()->arg_size(); --- ./hotspot/src/share/vm/opto/callGenerator.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/callGenerator.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -31,8 +31,6 @@ #include "opto/type.hpp" #include "runtime/deoptimization.hpp" -class Parse; - //---------------------------CallGenerator------------------------------------- // The subclasses of this class handle generation of ideal nodes for // call sites and method entry points. @@ -63,8 +61,9 @@ virtual bool is_virtual() const { return false; } // is_deferred: The decision whether to inline or not is deferred. virtual bool is_deferred() const { return false; } - // is_predicted: Uses an explicit check against a predicted type. - virtual bool is_predicted() const { return false; } + // is_predicated: Uses an explicit check (predicate). + virtual bool is_predicated() const { return false; } + virtual int predicates_count() const { return 0; } // is_trap: Does not return to the caller. (E.g., uncommon trap.) virtual bool is_trap() const { return false; } // does_virtual_dispatch: Should try inlining as normal method first. @@ -111,7 +110,7 @@ // // If the result is NULL, it means that this CallGenerator was unable // to handle the given call, and another CallGenerator should be consulted. - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0; + virtual JVMState* generate(JVMState* jvms) = 0; // How to generate a call site that is inlined: static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1); @@ -157,9 +156,9 @@ // Registry for intrinsics: static CallGenerator* for_intrinsic(ciMethod* m); static void register_intrinsic(ciMethod* m, CallGenerator* cg); - static CallGenerator* for_predicted_intrinsic(CallGenerator* intrinsic, - CallGenerator* cg); - virtual Node* generate_predicate(JVMState* jvms) { return NULL; }; + static CallGenerator* for_predicated_intrinsic(CallGenerator* intrinsic, + CallGenerator* cg); + virtual Node* generate_predicate(JVMState* jvms, int predicate) { return NULL; }; virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); } --- ./hotspot/src/share/vm/opto/callnode.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/callnode.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -777,7 +777,7 @@ } // Returns the unique CheckCastPP of a call -// or 'this' if there are several CheckCastPP +// or 'this' if there are several CheckCastPP or unexpected uses // or returns NULL if there is no one. Node *CallNode::result_cast() { Node *cast = NULL; @@ -793,6 +793,13 @@ return this; // more than 1 CheckCastPP } cast = use; + } else if (!use->is_Initialize() && + !use->is_AddP()) { + // Expected uses are restricted to a CheckCastPP, an Initialize + // node, and AddP nodes. If we encounter any other use (a Phi + // node can be seen in rare cases) return this to prevent + // incorrect optimizations. + return this; } } return cast; @@ -1089,6 +1096,7 @@ #ifndef PRODUCT void SafePointNode::dump_spec(outputStream *st) const { st->print(" SafePoint "); + _replaced_nodes.dump(st); } #endif --- ./hotspot/src/share/vm/opto/callnode.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/callnode.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -30,6 +30,7 @@ #include "opto/multnode.hpp" #include "opto/opcodes.hpp" #include "opto/phaseX.hpp" +#include "opto/replacednodes.hpp" #include "opto/type.hpp" // Portions of code courtesy of Clifford Click @@ -335,6 +336,7 @@ OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC JVMState* const _jvms; // Pointer to list of JVM State objects const TypePtr* _adr_type; // What type of memory does this node produce? + ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() // Many calls take *all* of memory as input, // but some produce a limited subset of that memory as output. @@ -426,6 +428,37 @@ void set_next_exception(SafePointNode* n); bool has_exceptions() const { return next_exception() != NULL; } + // Helper methods to operate on replaced nodes + ReplacedNodes replaced_nodes() const { + return _replaced_nodes; + } + + void set_replaced_nodes(ReplacedNodes replaced_nodes) { + _replaced_nodes = replaced_nodes; + } + + void clone_replaced_nodes() { + _replaced_nodes.clone(); + } + void record_replaced_node(Node* initial, Node* improved) { + _replaced_nodes.record(initial, improved); + } + void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { + _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); + } + void delete_replaced_nodes() { + _replaced_nodes.reset(); + } + void apply_replaced_nodes() { + _replaced_nodes.apply(this); + } + void merge_replaced_nodes_with(SafePointNode* sfpt) { + _replaced_nodes.merge_with(sfpt->_replaced_nodes); + } + bool has_replaced_nodes() const { + return !_replaced_nodes.is_empty(); + } + // Standard Node stuff virtual int Opcode() const; virtual bool pinned() const { return true; } --- ./hotspot/src/share/vm/opto/coalesce.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/coalesce.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -281,9 +281,11 @@ Block *pred = _phc._cfg.get_block_for_node(b->pred(j)); Node *copy; assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); - // Rematerialize constants instead of copying them - if( m->is_Mach() && m->as_Mach()->is_Con() && - m->as_Mach()->rematerialize() ) { + // Rematerialize constants instead of copying them. + // We do this only for immediate constants, we avoid constant table loads + // because that will unsafely extend the live range of the constant table base. + if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() && + m->as_Mach()->rematerialize()) { copy = m->clone(); // Insert the copy in the predecessor basic block pred->add_inst(copy); @@ -317,8 +319,8 @@ assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); // At this point it is unsafe to extend live ranges (6550579). // Rematerialize only constants as we do for Phi above. - if(m->is_Mach() && m->as_Mach()->is_Con() && - m->as_Mach()->rematerialize()) { + if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() && + m->as_Mach()->rematerialize()) { copy = m->clone(); // Insert the copy in the basic block, just before us b->insert_node(copy, l++); --- ./hotspot/src/share/vm/opto/compile.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/compile.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -391,6 +391,11 @@ uint next = 0; while (next < useful.size()) { Node *n = useful.at(next++); + if (n->is_SafePoint()) { + // We're done with a parsing phase. Replaced nodes are not valid + // beyond that point. + n->as_SafePoint()->delete_replaced_nodes(); + } // Use raw traversal of out edges since this code removes out edges int max = n->outcnt(); for (int j = 0; j < max; ++j) { @@ -660,6 +665,10 @@ _printer(IdealGraphPrinter::printer()), #endif _congraph(NULL), + _comp_arena(mtCompiler), + _node_arena(mtCompiler), + _old_arena(mtCompiler), + _Compile_types(mtCompiler), _replay_inline_data(NULL), _late_inlines(comp_arena(), 2, 0, NULL), _string_late_inlines(comp_arena(), 2, 0, NULL), @@ -670,8 +679,8 @@ _inlining_incrementally(false), _print_inlining_list(NULL), _print_inlining_idx(0), - _preserve_jvm_state(0), - _interpreter_frame_size(0) { + _interpreter_frame_size(0), + _max_node_limit(MaxNodeLimit) { C = this; CompileWrapper cw(this); @@ -782,7 +791,7 @@ return; } JVMState* jvms = build_start_state(start(), tf()); - if ((jvms = cg->generate(jvms, NULL)) == NULL) { + if ((jvms = cg->generate(jvms)) == NULL) { record_method_not_compilable("method parse failed"); return; } @@ -968,6 +977,10 @@ _in_dump_cnt(0), _printer(NULL), #endif + _comp_arena(mtCompiler), + _node_arena(mtCompiler), + _old_arena(mtCompiler), + _Compile_types(mtCompiler), _dead_node_list(comp_arena()), _dead_node_count(0), _congraph(NULL), @@ -977,9 +990,9 @@ _inlining_incrementally(false), _print_inlining_list(NULL), _print_inlining_idx(0), - _preserve_jvm_state(0), _allowed_reasons(0), - _interpreter_frame_size(0) { + _interpreter_frame_size(0), + _max_node_limit(MaxNodeLimit) { C = this; #ifndef PRODUCT @@ -1089,6 +1102,7 @@ set_do_count_invocations(false); set_do_method_data_update(false); set_rtm_state(NoRTM); // No RTM lock eliding by default + method_has_option_value("MaxNodeLimit", _max_node_limit); #if INCLUDE_RTM_OPT if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) { int rtm_state = method()->method_data()->rtm_state(); @@ -1910,6 +1924,8 @@ for_igvn()->clear(); gvn->replace_with(&igvn); + _late_inlines_pos = _late_inlines.length(); + while (_boxing_late_inlines.length() > 0) { CallGenerator* cg = _boxing_late_inlines.pop(); cg->do_late_inline(); @@ -1973,8 +1989,8 @@ if (live_nodes() > (uint)LiveNodeCountInliningCutoff) { if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) { // PhaseIdealLoop is expensive so we only try it once we are - // out of loop and we only try it again if the previous helped - // got the number of nodes down significantly + // out of live nodes and we only try it again if the previous + // helped got the number of nodes down significantly PhaseIdealLoop ideal_loop( igvn, false, true ); if (failing()) return; low_live_nodes = live_nodes(); @@ -2066,6 +2082,10 @@ // Inline valueOf() methods now. inline_boxing_calls(igvn); + if (AlwaysIncrementalInline) { + inline_incrementally(igvn); + } + print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2); if (failing()) return; --- ./hotspot/src/share/vm/opto/compile.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/compile.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -290,6 +290,7 @@ int _freq_inline_size; // Max hot method inline size for this compilation int _fixed_slots; // count of frame slots not allocated by the register // allocator i.e. locks, original deopt pc, etc. + uintx _max_node_limit; // Max unique node count during a single compilation. // For deopt int _orig_pc_slot; int _orig_pc_slot_offset_in_bytes; @@ -429,9 +430,6 @@ // Remove the speculative part of types and clean up the graph void remove_speculative_types(PhaseIterGVN &igvn); - // Are we within a PreserveJVMState block? - int _preserve_jvm_state; - void* _replay_inline_data; // Pointer to data loaded from file public: @@ -597,10 +595,17 @@ void set_rtm_state(RTMState s) { _rtm_state = s; } bool use_rtm() const { return (_rtm_state & NoRTM) == 0; } bool profile_rtm() const { return _rtm_state == ProfileRTM; } + uint max_node_limit() const { return (uint)_max_node_limit; } + void set_max_node_limit(uint n) { _max_node_limit = n; } + // check the CompilerOracle for special behaviours for this compile bool method_has_option(const char * option) { return method() != NULL && method()->has_option(option); } + template + bool method_has_option_value(const char * option, T& value) { + return method() != NULL && method()->has_option_value(option, value); + } #ifndef PRODUCT bool trace_opto_output() const { return _trace_opto_output; } bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; } @@ -722,7 +727,7 @@ record_method_not_compilable(reason, true); } bool check_node_count(uint margin, const char* reason) { - if (live_nodes() + margin > (uint)MaxNodeLimit) { + if (live_nodes() + margin > max_node_limit()) { record_method_not_compilable(reason); return true; } else { @@ -1196,21 +1201,6 @@ // Auxiliary method for randomized fuzzing/stressing static bool randomized_select(int count); - - // enter a PreserveJVMState block - void inc_preserve_jvm_state() { - _preserve_jvm_state++; - } - - // exit a PreserveJVMState block - void dec_preserve_jvm_state() { - _preserve_jvm_state--; - assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative"); - } - - bool has_preserve_jvm_state() const { - return _preserve_jvm_state > 0; - } }; #endif // SHARE_VM_OPTO_COMPILE_HPP --- ./hotspot/src/share/vm/opto/connode.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/connode.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -441,6 +441,102 @@ return this; } +uint CastIINode::size_of() const { + return sizeof(*this); +} + +uint CastIINode::cmp(const Node &n) const { + return TypeNode::cmp(n) && ((CastIINode&)n)._carry_dependency == _carry_dependency; +} + +Node *CastIINode::Identity(PhaseTransform *phase) { + if (_carry_dependency) { + return this; + } + return ConstraintCastNode::Identity(phase); +} + +const Type *CastIINode::Value(PhaseTransform *phase) const { + const Type *res = ConstraintCastNode::Value(phase); + + // Try to improve the type of the CastII if we recognize a CmpI/If + // pattern. + if (_carry_dependency) { + if (in(0) != NULL && in(0)->in(0) != NULL && in(0)->in(0)->is_If()) { + assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj"); + Node* proj = in(0); + if (proj->in(0)->in(1)->is_Bool()) { + Node* b = proj->in(0)->in(1); + if (b->in(1)->Opcode() == Op_CmpI) { + Node* cmp = b->in(1); + if (cmp->in(1) == in(1) && phase->type(cmp->in(2))->isa_int()) { + const TypeInt* in2_t = phase->type(cmp->in(2))->is_int(); + const Type* t = TypeInt::INT; + BoolTest test = b->as_Bool()->_test; + if (proj->is_IfFalse()) { + test = test.negate(); + } + BoolTest::mask m = test._test; + jlong lo_long = min_jint; + jlong hi_long = max_jint; + if (m == BoolTest::le || m == BoolTest::lt) { + hi_long = in2_t->_hi; + if (m == BoolTest::lt) { + hi_long -= 1; + } + } else if (m == BoolTest::ge || m == BoolTest::gt) { + lo_long = in2_t->_lo; + if (m == BoolTest::gt) { + lo_long += 1; + } + } else if (m == BoolTest::eq) { + lo_long = in2_t->_lo; + hi_long = in2_t->_hi; + } else if (m == BoolTest::ne) { + // can't do any better + } else { + stringStream ss; + test.dump_on(&ss); + fatal(err_msg_res("unexpected comparison %s", ss.as_string())); + } + int lo_int = (int)lo_long; + int hi_int = (int)hi_long; + + if (lo_long != (jlong)lo_int) { + lo_int = min_jint; + } + if (hi_long != (jlong)hi_int) { + hi_int = max_jint; + } + + t = TypeInt::make(lo_int, hi_int, Type::WidenMax); + + res = res->filter_speculative(t); + + return res; + } + } + } + } + } + return res; +} + +Node *CastIINode::Ideal_DU_postCCP(PhaseCCP *ccp) { + if (_carry_dependency) { + return NULL; + } + return ConstraintCastNode::Ideal_DU_postCCP(ccp); +} + +#ifndef PRODUCT +void CastIINode::dump_spec(outputStream *st) const { + TypeNode::dump_spec(st); + if (_carry_dependency) { + st->print(" carry dependency"); + } +} +#endif //============================================================================= --- ./hotspot/src/share/vm/opto/connode.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/connode.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -241,10 +241,25 @@ //------------------------------CastIINode------------------------------------- // cast integer to integer (different range) class CastIINode: public ConstraintCastNode { + private: + // Can this node be removed post CCP or does it carry a required dependency? + const bool _carry_dependency; + + protected: + virtual uint cmp( const Node &n ) const; + virtual uint size_of() const; + public: - CastIINode (Node *n, const Type *t ): ConstraintCastNode(n,t) {} + CastIINode(Node *n, const Type *t, bool carry_dependency = false) + : ConstraintCastNode(n,t), _carry_dependency(carry_dependency) {} virtual int Opcode() const; virtual uint ideal_reg() const { return Op_RegI; } + virtual Node *Identity( PhaseTransform *phase ); + virtual const Type *Value( PhaseTransform *phase ) const; + virtual Node *Ideal_DU_postCCP( PhaseCCP * ); +#ifndef PRODUCT + virtual void dump_spec(outputStream *st) const; +#endif }; //------------------------------CastPPNode------------------------------------- --- ./hotspot/src/share/vm/opto/doCall.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/doCall.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -115,12 +115,12 @@ if (allow_inline && allow_intrinsics) { CallGenerator* cg = find_intrinsic(callee, call_does_dispatch); if (cg != NULL) { - if (cg->is_predicted()) { + if (cg->is_predicated()) { // Code without intrinsic but, hopefully, inlined. CallGenerator* inline_cg = this->call_generator(callee, vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false); if (inline_cg != NULL) { - cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg); + cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg); } } @@ -410,6 +410,11 @@ ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); assert(declared_signature != NULL, "cannot be null"); + // Bump max node limit for JSR292 users + if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) { + C->set_max_node_limit(3*MaxNodeLimit); + } + // uncommon-trap when callee is unloaded, uninitialized or will not link // bailout when too many arguments for register representation if (!will_link || can_not_compile_call_site(orig_callee, klass)) { @@ -523,7 +528,7 @@ // because exceptions don't return to the call site.) profile_call(receiver); - JVMState* new_jvms = cg->generate(jvms, this); + JVMState* new_jvms = cg->generate(jvms); if (new_jvms == NULL) { // When inlining attempt fails (e.g., too many arguments), // it may contaminate the current compile state, making it @@ -537,7 +542,7 @@ // intrinsic was expecting to optimize. Should always be possible to // get a normal java call that may inline in that case cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false); - if ((new_jvms = cg->generate(jvms, this)) == NULL) { + if ((new_jvms = cg->generate(jvms)) == NULL) { guarantee(failing(), "call failed to generate: calls should work"); return; } @@ -791,7 +796,7 @@ Node* ex_klass_node = NULL; if (has_ex_handler() && !ex_type->klass_is_exact()) { Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes()); - ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) ); + ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT)); // Compute the exception klass a little more cleverly. // Obvious solution is to simple do a LoadKlass from the 'ex_node'. @@ -799,11 +804,17 @@ // each arm of the Phi. If I know something clever about the exceptions // I'm loading the class from, I can replace the LoadKlass with the // klass constant for the exception oop. - if( ex_node->is_Phi() ) { - ex_klass_node = new (C) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT ); - for( uint i = 1; i < ex_node->req(); i++ ) { - Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() ); - Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) ); + if (ex_node->is_Phi()) { + ex_klass_node = new (C) PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT); + for (uint i = 1; i < ex_node->req(); i++) { + Node* ex_in = ex_node->in(i); + if (ex_in == top() || ex_in == NULL) { + // This path was not taken. + ex_klass_node->init_req(i, top()); + continue; + } + Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes()); + Node* k = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT)); ex_klass_node->init_req( i, k ); } _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT); --- ./hotspot/src/share/vm/opto/escape.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/escape.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,8 @@ ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), + _in_worklist(C->comp_arena()), + _next_pidx(0), _collecting(true), _verify(false), _compile(C), @@ -124,13 +126,19 @@ if (C->root() != NULL) { ideal_nodes.push(C->root()); } + // Processed ideal nodes are unique on ideal_nodes list + // but several ideal nodes are mapped to the phantom_obj. + // To avoid duplicated entries on the following worklists + // add the phantom_obj only once to them. + ptnodes_worklist.append(phantom_obj); + java_objects_worklist.append(phantom_obj); for( uint next = 0; next < ideal_nodes.size(); ++next ) { Node* n = ideal_nodes.at(next); // Create PointsTo nodes and add them to Connection Graph. Called // only once per ideal node since ideal_nodes is Unique_Node list. add_node_to_connection_graph(n, &delayed_worklist); PointsToNode* ptn = ptnode_adr(n->_idx); - if (ptn != NULL) { + if (ptn != NULL && ptn != phantom_obj) { ptnodes_worklist.append(ptn); if (ptn->is_JavaObject()) { java_objects_worklist.append(ptn->as_JavaObject()); @@ -414,7 +422,7 @@ } case Op_CreateEx: { // assume that all exception objects globally escape - add_java_object(n, PointsToNode::GlobalEscape); + map_ideal_node(n, phantom_obj); break; } case Op_LoadKlass: @@ -938,7 +946,14 @@ strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || - strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0) + strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || + strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0) ))) { call->dump(); fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name)); @@ -1058,13 +1073,8 @@ // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. // Set limit to 20 to catch situation when something did go wrong and // bailout Escape Analysis. - // Also limit build time to 30 sec (60 in debug VM). + // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. #define CG_BUILD_ITER_LIMIT 20 -#ifdef ASSERT -#define CG_BUILD_TIME_LIMIT 60.0 -#else -#define CG_BUILD_TIME_LIMIT 30.0 -#endif // Propagate GlobalEscape and ArgEscape escape states and check that // we still have non-escaping objects. The method pushs on _worklist @@ -1075,12 +1085,13 @@ // Now propagate references to all JavaObject nodes. int java_objects_length = java_objects_worklist.length(); elapsedTimer time; + bool timeout = false; int new_edges = 1; int iterations = 0; do { while ((new_edges > 0) && - (iterations++ < CG_BUILD_ITER_LIMIT) && - (time.seconds() < CG_BUILD_TIME_LIMIT)) { + (iterations++ < CG_BUILD_ITER_LIMIT)) { + double start_time = time.seconds(); time.start(); new_edges = 0; // Propagate references to phantom_object for nodes pushed on _worklist @@ -1089,7 +1100,29 @@ for (int next = 0; next < java_objects_length; ++next) { JavaObjectNode* ptn = java_objects_worklist.at(next); new_edges += add_java_object_edges(ptn, true); + +#define SAMPLE_SIZE 4 + if ((next % SAMPLE_SIZE) == 0) { + // Each 4 iterations calculate how much time it will take + // to complete graph construction. + time.stop(); + // Poll for requests from shutdown mechanism to quiesce compiler + // because Connection graph construction may take long time. + CompileBroker::maybe_block(); + double stop_time = time.seconds(); + double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; + double time_until_end = time_per_iter * (double)(java_objects_length - next); + if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { + timeout = true; + break; // Timeout + } + start_time = stop_time; + time.start(); + } +#undef SAMPLE_SIZE + } + if (timeout) break; if (new_edges > 0) { // Update escape states on each iteration if graph was updated. if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { @@ -1097,9 +1130,12 @@ } } time.stop(); + if (time.seconds() >= EscapeAnalysisTimeout) { + timeout = true; + break; + } } - if ((iterations < CG_BUILD_ITER_LIMIT) && - (time.seconds() < CG_BUILD_TIME_LIMIT)) { + if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { time.start(); // Find fields which have unknown value. int fields_length = oop_fields_worklist.length(); @@ -1112,18 +1148,21 @@ } } time.stop(); + if (time.seconds() >= EscapeAnalysisTimeout) { + timeout = true; + break; + } } else { new_edges = 0; // Bailout } } while (new_edges > 0); // Bailout if passed limits. - if ((iterations >= CG_BUILD_ITER_LIMIT) || - (time.seconds() >= CG_BUILD_TIME_LIMIT)) { + if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { Compile* C = _compile; if (C->log() != NULL) { C->log()->begin_elem("connectionGraph_bailout reason='reached "); - C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time"); + C->log()->text("%s", timeout ? "time" : "iterations"); C->log()->end_elem(" limit'"); } assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", @@ -1140,7 +1179,6 @@ #endif #undef CG_BUILD_ITER_LIMIT -#undef CG_BUILD_TIME_LIMIT // Find fields initialized by NULL for non-escaping Allocations. int non_escaped_length = non_escaped_worklist.length(); @@ -1264,8 +1302,8 @@ } } } - while(_worklist.length() > 0) { - PointsToNode* use = _worklist.pop(); + for (int l = 0; l < _worklist.length(); l++) { + PointsToNode* use = _worklist.at(l); if (PointsToNode::is_base_use(use)) { // Add reference from jobj to field and from field to jobj (field's base). use = PointsToNode::get_use_node(use)->as_Field(); @@ -1312,6 +1350,8 @@ add_field_uses_to_worklist(use->as_Field()); } } + _worklist.clear(); + _in_worklist.Reset(); return new_edges; } @@ -1891,7 +1931,7 @@ return; } Compile* C = _compile; - ptadr = new (C->comp_arena()) LocalVarNode(C, n, es); + ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); _nodes.at_put(n->_idx, ptadr); } @@ -1902,7 +1942,7 @@ return; } Compile* C = _compile; - ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es); + ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); _nodes.at_put(n->_idx, ptadr); } @@ -1918,7 +1958,7 @@ es = PointsToNode::GlobalEscape; } Compile* C = _compile; - FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop); + FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); _nodes.at_put(n->_idx, field); } @@ -1932,7 +1972,7 @@ return; } Compile* C = _compile; - ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es); + ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); _nodes.at_put(n->_idx, ptadr); // Add edge from arraycopy node to source object. (void)add_edge(ptadr, src); @@ -2372,7 +2412,7 @@ } } } - if ((int) (C->live_nodes() + 2*NodeLimitFudgeFactor) > MaxNodeLimit) { + if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { if (C->do_escape_analysis() == true && !C->failing()) { // Retry compilation without escape analysis. // If this is the first failure, the sentinel string will "stick" @@ -2832,6 +2872,13 @@ continue; } } + + const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); + if (t == NULL) + continue; // not a TypeOopPtr + if (!t->klass_is_exact()) + continue; // not an unique type + if (alloc->is_Allocate()) { // Set the scalar_replaceable flag for allocation // so it could be eliminated. @@ -2850,10 +2897,7 @@ // - not determined to be ineligible by escape analysis set_map(alloc, n); set_map(n, alloc); - const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); - if (t == NULL) - continue; // not a TypeOopPtr - const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni); + const TypeOopPtr* tinst = t->cast_to_instance_id(ni); igvn->hash_delete(n); igvn->set_type(n, tinst); n->raise_bottom_type(tinst); --- ./hotspot/src/share/vm/opto/escape.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/escape.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -125,6 +125,8 @@ class FieldNode; class ArraycopyNode; +class ConnectionGraph; + // ConnectionGraph nodes class PointsToNode : public ResourceObj { GrowableArray _edges; // List of nodes this node points to @@ -137,6 +139,7 @@ Node* const _node; // Ideal node corresponding to this PointsTo node. const int _idx; // Cached ideal node's _idx + const uint _pidx; // Index of this node public: typedef enum { @@ -165,17 +168,9 @@ } NodeFlags; - PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type): - _edges(C->comp_arena(), 2, 0, NULL), - _uses (C->comp_arena(), 2, 0, NULL), - _node(n), - _idx(n->_idx), - _type((u1)type), - _escape((u1)es), - _fields_escape((u1)es), - _flags(ScalarReplaceable) { - assert(n != NULL && es != UnknownEscape, "sanity"); - } + inline PointsToNode(ConnectionGraph* CG, Node* n, EscapeState es, NodeType type); + + uint pidx() const { return _pidx; } Node* ideal_node() const { return _node; } int idx() const { return _idx; } @@ -243,14 +238,14 @@ class LocalVarNode: public PointsToNode { public: - LocalVarNode(Compile *C, Node* n, EscapeState es): - PointsToNode(C, n, es, LocalVar) {} + LocalVarNode(ConnectionGraph *CG, Node* n, EscapeState es): + PointsToNode(CG, n, es, LocalVar) {} }; class JavaObjectNode: public PointsToNode { public: - JavaObjectNode(Compile *C, Node* n, EscapeState es): - PointsToNode(C, n, es, JavaObject) { + JavaObjectNode(ConnectionGraph *CG, Node* n, EscapeState es): + PointsToNode(CG, n, es, JavaObject) { if (es > NoEscape) set_scalar_replaceable(false); } @@ -262,8 +257,8 @@ const bool _is_oop; // Field points to object bool _has_unknown_base; // Has phantom_object base public: - FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop): - PointsToNode(C, n, es, Field), + FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop): + PointsToNode(CG, n, es, Field), _offset(offs), _is_oop(is_oop), _has_unknown_base(false) {} @@ -284,8 +279,8 @@ class ArraycopyNode: public PointsToNode { public: - ArraycopyNode(Compile *C, Node* n, EscapeState es): - PointsToNode(C, n, es, Arraycopy) {} + ArraycopyNode(ConnectionGraph *CG, Node* n, EscapeState es): + PointsToNode(CG, n, es, Arraycopy) {} }; // Iterators for PointsTo node's edges: @@ -323,11 +318,14 @@ class ConnectionGraph: public ResourceObj { + friend class PointsToNode; private: GrowableArray _nodes; // Map from ideal nodes to // ConnectionGraph nodes. GrowableArray _worklist; // Nodes to be processed + VectorSet _in_worklist; + uint _next_pidx; bool _collecting; // Indicates whether escape information // is still being collected. If false, @@ -353,6 +351,8 @@ } uint nodes_size() const { return _nodes.length(); } + uint next_pidx() { return _next_pidx++; } + // Add nodes to ConnectionGraph. void add_local_var(Node* n, PointsToNode::EscapeState es); void add_java_object(Node* n, PointsToNode::EscapeState es); @@ -396,15 +396,26 @@ int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist); // Put node on worklist if it is (or was) not there. - void add_to_worklist(PointsToNode* pt) { - _worklist.push(pt); - return; + inline void add_to_worklist(PointsToNode* pt) { + PointsToNode* ptf = pt; + uint pidx_bias = 0; + if (PointsToNode::is_base_use(pt)) { + // Create a separate entry in _in_worklist for a marked base edge + // because _worklist may have an entry for a normal edge pointing + // to the same node. To separate them use _next_pidx as bias. + ptf = PointsToNode::get_use_node(pt)->as_Field(); + pidx_bias = _next_pidx; + } + if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) { + _worklist.append(pt); + } } // Put on worklist all uses of this node. - void add_uses_to_worklist(PointsToNode* pt) { - for (UseIterator i(pt); i.has_next(); i.next()) - _worklist.push(i.get()); + inline void add_uses_to_worklist(PointsToNode* pt) { + for (UseIterator i(pt); i.has_next(); i.next()) { + add_to_worklist(i.get()); + } } // Put on worklist all field's uses and related field nodes. @@ -517,8 +528,8 @@ } // Helper functions bool is_oop_field(Node* n, int offset, bool* unsafe); - static Node* get_addp_base(Node *addp); - static Node* find_second_addp(Node* addp, Node* n); + static Node* get_addp_base(Node *addp); + static Node* find_second_addp(Node* addp, Node* n); // offset of a field reference int address_offset(Node* adr, PhaseTransform *phase); @@ -587,4 +598,17 @@ #endif }; +inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type): + _edges(CG->_compile->comp_arena(), 2, 0, NULL), + _uses (CG->_compile->comp_arena(), 2, 0, NULL), + _node(n), + _idx(n->_idx), + _pidx(CG->next_pidx()), + _type((u1)type), + _escape((u1)es), + _fields_escape((u1)es), + _flags(ScalarReplaceable) { + assert(n != NULL && es != UnknownEscape, "sanity"); +} + #endif // SHARE_VM_OPTO_ESCAPE_HPP --- ./hotspot/src/share/vm/opto/graphKit.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/graphKit.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -428,6 +428,7 @@ } } } + phi_map->merge_replaced_nodes_with(ex_map); } //--------------------------use_exception_state-------------------------------- @@ -641,7 +642,6 @@ _map = kit->map(); // preserve the map _sp = kit->sp(); kit->set_map(clone_map ? kit->clone_map() : NULL); - Compile::current()->inc_preserve_jvm_state(); #ifdef ASSERT _bci = kit->bci(); Parse* parser = kit->is_Parse(); @@ -659,7 +659,6 @@ #endif kit->set_map(_map); kit->set_sp(_sp); - Compile::current()->dec_preserve_jvm_state(); } @@ -1151,7 +1150,7 @@ Node* akls = AllocateNode::Ideal_klass(obj, &_gvn); if (akls != NULL) return akls; Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); - return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) ); + return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); } //-------------------------load_array_length----------------------------------- @@ -1398,60 +1397,17 @@ // on the map. This includes locals, stack, and monitors // of the current (innermost) JVM state. - if (!ReplaceInParentMaps) { + // don't let inconsistent types from profiling escape this + // method + + const Type* told = _gvn.type(old); + const Type* tnew = _gvn.type(neww); + + if (!tnew->higher_equal(told)) { return; } - // PreserveJVMState doesn't do a deep copy so we can't modify - // parents - if (Compile::current()->has_preserve_jvm_state()) { - return; - } - - Parse* parser = is_Parse(); - bool progress = true; - Node* ctrl = map()->in(0); - // Follow the chain of parsers and see whether the update can be - // done in the map of callers. We can do the replace for a caller if - // the current control post dominates the control of a caller. - while (parser != NULL && parser->caller() != NULL && progress) { - progress = false; - Node* parent_map = parser->caller()->map(); - assert(parser->exits().map()->jvms()->depth() == parser->caller()->depth(), "map mismatch"); - - Node* parent_ctrl = parent_map->in(0); - - while (parent_ctrl->is_Region()) { - Node* n = parent_ctrl->as_Region()->is_copy(); - if (n == NULL) { - break; - } - parent_ctrl = n; - } - - for (;;) { - if (ctrl == parent_ctrl) { - // update the map of the exits which is the one that will be - // used when compilation resume after inlining - parser->exits().map()->replace_edge(old, neww); - progress = true; - break; - } - if (ctrl->is_Proj() && ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { - ctrl = ctrl->in(0)->in(0); - } else if (ctrl->is_Region()) { - Node* n = ctrl->as_Region()->is_copy(); - if (n == NULL) { - break; - } - ctrl = n; - } else { - break; - } - } - - parser = parser->parent_parser(); - } + map()->record_replaced_node(old, neww); } @@ -1855,12 +1811,16 @@ // Replace the call with the current state of the kit. -void GraphKit::replace_call(CallNode* call, Node* result) { +void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) { JVMState* ejvms = NULL; if (has_exceptions()) { ejvms = transfer_exceptions_into_jvms(); } + ReplacedNodes replaced_nodes = map()->replaced_nodes(); + ReplacedNodes replaced_nodes_exception; + Node* ex_ctl = top(); + SafePointNode* final_state = stop(); // Find all the needed outputs of this call @@ -1877,6 +1837,10 @@ C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl); } if (callprojs.fallthrough_memproj != NULL) { + if (final_mem->is_MergeMem()) { + // Parser's exits MergeMem was not transformed but may be optimized + final_mem = _gvn.transform(final_mem); + } C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem); } if (callprojs.fallthrough_ioproj != NULL) { @@ -1908,10 +1872,13 @@ // Load my combined exception state into the kit, with all phis transformed: SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states(); + replaced_nodes_exception = ex_map->replaced_nodes(); Node* ex_oop = ekit.use_exception_state(ex_map); + if (callprojs.catchall_catchproj != NULL) { C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control()); + ex_ctl = ekit.control(); } if (callprojs.catchall_memproj != NULL) { C->gvn_replace_by(callprojs.catchall_memproj, ekit.reset_memory()); @@ -1944,6 +1911,13 @@ _gvn.transform(wl.pop()); } } + + if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) { + replaced_nodes.apply(C, final_ctl); + } + if (!ex_ctl->is_top() && do_replaced_nodes) { + replaced_nodes_exception.apply(C, ex_ctl); + } } @@ -2435,23 +2409,24 @@ Node* new_slice = mms.memory2(); if (old_slice != new_slice) { PhiNode* phi; - if (new_slice->is_Phi() && new_slice->as_Phi()->region() == region) { - phi = new_slice->as_Phi(); - #ifdef ASSERT - if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) - old_slice = old_slice->in(new_path); - // Caller is responsible for ensuring that any pre-existing - // phis are already aware of old memory. - int old_path = (new_path > 1) ? 1 : 2; // choose old_path != new_path - assert(phi->in(old_path) == old_slice, "pre-existing phis OK"); - #endif - mms.set_memory(phi); + if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) { + if (mms.is_empty()) { + // clone base memory Phi's inputs for this memory slice + assert(old_slice == mms.base_memory(), "sanity"); + phi = PhiNode::make(region, NULL, Type::MEMORY, mms.adr_type(C)); + _gvn.set_type(phi, Type::MEMORY); + for (uint i = 1; i < phi->req(); i++) { + phi->init_req(i, old_slice->in(i)); + } + } else { + phi = old_slice->as_Phi(); // Phi was generated already + } } else { phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C)); _gvn.set_type(phi, Type::MEMORY); - phi->set_req(new_path, new_slice); - mms.set_memory(_gvn.transform(phi)); // assume it is complete } + phi->set_req(new_path, new_slice); + mms.set_memory(phi); } } } @@ -2567,7 +2542,7 @@ // cache which is mutable so can't use immutable memory. Other // types load from the super-class display table which is immutable. Node *kmem = might_be_cache ? memory(p2) : immutable_memory(); - Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) ); + Node* nkls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL)); // Compile speed common case: ARE a subtype and we canNOT fail if( superklass == nkls ) --- ./hotspot/src/share/vm/opto/graphKit.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/graphKit.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -685,7 +685,7 @@ // Replace the call with the current state of the kit. Requires // that the call was generated with separate io_projs so that // exceptional control flow can be handled properly. - void replace_call(CallNode* call, Node* result); + void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false); // helper functions for statistics void increment_counter(address counter_addr); // increment a debug counter --- ./hotspot/src/share/vm/opto/ifg.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/ifg.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -541,17 +541,37 @@ if( !n->is_Proj() || // Could also be a flags-projection of a dead ADD or such. (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) { - block->remove_node(j - 1); - if (lrgs(r)._def == n) { - lrgs(r)._def = 0; + bool remove = true; + if (n->is_MachProj()) { + // Don't remove KILL projections if their "defining" nodes have + // memory effects (have SCMemProj projection node) - + // they are not dead even when their result is not used. + // For example, compareAndSwapL (and other CAS) and EncodeISOArray nodes. + // The method add_input_to_liveout() keeps such nodes alive (put them on liveout list) + // when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed + // in block in such order that KILL MachProj nodes are processed first. + uint cnt = def->outcnt(); + for (uint i = 0; i < cnt; i++) { + Node* proj = def->raw_out(i); + if (proj->Opcode() == Op_SCMemProj) { + remove = false; + break; + } + } } - n->disconnect_inputs(NULL, C); - _cfg.unmap_node_from_block(n); - n->replace_by(C->top()); - // Since yanking a Node from block, high pressure moves up one - hrp_index[0]--; - hrp_index[1]--; - continue; + if (remove) { + block->remove_node(j - 1); + if (lrgs(r)._def == n) { + lrgs(r)._def = 0; + } + n->disconnect_inputs(NULL, C); + _cfg.unmap_node_from_block(n); + n->replace_by(C->top()); + // Since yanking a Node from block, high pressure moves up one + hrp_index[0]--; + hrp_index[1]--; + continue; + } } // Fat-projections kill many registers which cannot be used to --- ./hotspot/src/share/vm/opto/ifnode.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/ifnode.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -820,6 +820,11 @@ static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff); +struct RangeCheck { + Node* ctl; + jint off; +}; + //------------------------------Ideal------------------------------------------ // Return a node which is more "ideal" than the current node. Strip out // control copies @@ -861,83 +866,141 @@ jint offset1; int flip1 = is_range_check(range1, index1, offset1); if( flip1 ) { - Node *first_prev_dom = NULL; - // Try to remove extra range checks. All 'up_one_dom' gives up at merges // so all checks we inspect post-dominate the top-most check we find. // If we are going to fail the current check and we reach the top check // then we are guaranteed to fail, so just start interpreting there. - // We 'expand' the top 2 range checks to include all post-dominating + // We 'expand' the top 3 range checks to include all post-dominating // checks. - // The top 2 range checks seen - Node *prev_chk1 = NULL; - Node *prev_chk2 = NULL; + // The top 3 range checks seen + const int NRC =3; + RangeCheck prev_checks[NRC]; + int nb_checks = 0; + // Low and high offsets seen so far jint off_lo = offset1; jint off_hi = offset1; - // Scan for the top 2 checks and collect range of offsets - for( int dist = 0; dist < 999; dist++ ) { // Range-Check scan limit - if( dom->Opcode() == Op_If && // Not same opcode? - prev_dom->in(0) == dom ) { // One path of test does dominate? - if( dom == this ) return NULL; // dead loop + bool found_immediate_dominator = false; + + // Scan for the top checks and collect range of offsets + for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit + if (dom->Opcode() == Op_If && // Not same opcode? + prev_dom->in(0) == dom) { // One path of test does dominate? + if (dom == this) return NULL; // dead loop // See if this is a range check Node *index2, *range2; jint offset2; int flip2 = dom->as_If()->is_range_check(range2, index2, offset2); // See if this is a _matching_ range check, checking against // the same array bounds. - if( flip2 == flip1 && range2 == range1 && index2 == index1 && - dom->outcnt() == 2 ) { + if (flip2 == flip1 && range2 == range1 && index2 == index1 && + dom->outcnt() == 2) { + if (nb_checks == 0 && dom->in(1) == in(1)) { + // Found an immediately dominating test at the same offset. + // This kind of back-to-back test can be eliminated locally, + // and there is no need to search further for dominating tests. + assert(offset2 == offset1, "Same test but different offsets"); + found_immediate_dominator = true; + break; + } // Gather expanded bounds off_lo = MIN2(off_lo,offset2); off_hi = MAX2(off_hi,offset2); - // Record top 2 range checks - prev_chk2 = prev_chk1; - prev_chk1 = prev_dom; - // If we match the test exactly, then the top test covers - // both our lower and upper bounds. - if( dom->in(1) == in(1) ) - prev_chk2 = prev_chk1; + // Record top NRC range checks + prev_checks[nb_checks%NRC].ctl = prev_dom; + prev_checks[nb_checks%NRC].off = offset2; + nb_checks++; } } prev_dom = dom; - dom = up_one_dom( dom ); - if( !dom ) break; + dom = up_one_dom(dom); + if (!dom) break; } + if (!found_immediate_dominator) { + // Attempt to widen the dominating range check to cover some later + // ones. Since range checks "fail" by uncommon-trapping to the + // interpreter, widening a check can make us speculatively enter + // the interpreter. If we see range-check deopt's, do not widen! + if (!phase->C->allow_range_check_smearing()) return NULL; - // Attempt to widen the dominating range check to cover some later - // ones. Since range checks "fail" by uncommon-trapping to the - // interpreter, widening a check can make us speculative enter the - // interpreter. If we see range-check deopt's, do not widen! - if (!phase->C->allow_range_check_smearing()) return NULL; - - // Constant indices only need to check the upper bound. - // Non-constance indices must check both low and high. - if( index1 ) { - // Didn't find 2 prior covering checks, so cannot remove anything. - if( !prev_chk2 ) return NULL; - // 'Widen' the offsets of the 1st and 2nd covering check - adjust_check( prev_chk1, range1, index1, flip1, off_lo, igvn ); - // Do not call adjust_check twice on the same projection - // as the first call may have transformed the BoolNode to a ConI - if( prev_chk1 != prev_chk2 ) { - adjust_check( prev_chk2, range1, index1, flip1, off_hi, igvn ); + // Didn't find prior covering check, so cannot remove anything. + if (nb_checks == 0) { + return NULL; } - // Test is now covered by prior checks, dominate it out - prev_dom = prev_chk2; - } else { - // Didn't find prior covering check, so cannot remove anything. - if( !prev_chk1 ) return NULL; - // 'Widen' the offset of the 1st and only covering check - adjust_check( prev_chk1, range1, index1, flip1, off_hi, igvn ); - // Test is now covered by prior checks, dominate it out - prev_dom = prev_chk1; + // Constant indices only need to check the upper bound. + // Non-constant indices must check both low and high. + int chk0 = (nb_checks - 1) % NRC; + if (index1) { + if (nb_checks == 1) { + return NULL; + } else { + // If the top range check's constant is the min or max of + // all constants we widen the next one to cover the whole + // range of constants. + RangeCheck rc0 = prev_checks[chk0]; + int chk1 = (nb_checks - 2) % NRC; + RangeCheck rc1 = prev_checks[chk1]; + if (rc0.off == off_lo) { + adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn); + prev_dom = rc1.ctl; + } else if (rc0.off == off_hi) { + adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn); + prev_dom = rc1.ctl; + } else { + // If the top test's constant is not the min or max of all + // constants, we need 3 range checks. We must leave the + // top test unchanged because widening it would allow the + // accesses it protects to successfully read/write out of + // bounds. + if (nb_checks == 2) { + return NULL; + } + int chk2 = (nb_checks - 3) % NRC; + RangeCheck rc2 = prev_checks[chk2]; + // The top range check a+i covers interval: -a <= i < length-a + // The second range check b+i covers interval: -b <= i < length-b + if (rc1.off <= rc0.off) { + // if b <= a, we change the second range check to: + // -min_of_all_constants <= i < length-min_of_all_constants + // Together top and second range checks now cover: + // -min_of_all_constants <= i < length-a + // which is more restrictive than -b <= i < length-b: + // -b <= -min_of_all_constants <= i < length-a <= length-b + // The third check is then changed to: + // -max_of_all_constants <= i < length-max_of_all_constants + // so 2nd and 3rd checks restrict allowed values of i to: + // -min_of_all_constants <= i < length-max_of_all_constants + adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn); + adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn); + } else { + // if b > a, we change the second range check to: + // -max_of_all_constants <= i < length-max_of_all_constants + // Together top and second range checks now cover: + // -a <= i < length-max_of_all_constants + // which is more restrictive than -b <= i < length-b: + // -b < -a <= i < length-max_of_all_constants <= length-b + // The third check is then changed to: + // -max_of_all_constants <= i < length-max_of_all_constants + // so 2nd and 3rd checks restrict allowed values of i to: + // -min_of_all_constants <= i < length-max_of_all_constants + adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn); + adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn); + } + prev_dom = rc2.ctl; + } + } + } else { + RangeCheck rc0 = prev_checks[chk0]; + // 'Widen' the offset of the 1st and only covering check + adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn); + // Test is now covered by prior checks, dominate it out + prev_dom = rc0.ctl; + } } - } else { // Scan for an equivalent test Node *cmp; @@ -1019,7 +1082,7 @@ // for lower and upper bounds. ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj(); if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate)) - prev_dom = idom; + prev_dom = idom; // Now walk the current IfNode's projections. // Loop ends when 'this' has no more uses. --- ./hotspot/src/share/vm/opto/library_call.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/library_call.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -46,25 +46,28 @@ public: private: bool _is_virtual; - bool _is_predicted; bool _does_virtual_dispatch; + int8_t _predicates_count; // Intrinsic is predicated by several conditions + int8_t _last_predicate; // Last generated predicate vmIntrinsics::ID _intrinsic_id; public: - LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, bool does_virtual_dispatch, vmIntrinsics::ID id) + LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id) : InlineCallGenerator(m), _is_virtual(is_virtual), - _is_predicted(is_predicted), _does_virtual_dispatch(does_virtual_dispatch), + _predicates_count((int8_t)predicates_count), + _last_predicate((int8_t)-1), _intrinsic_id(id) { } virtual bool is_intrinsic() const { return true; } virtual bool is_virtual() const { return _is_virtual; } - virtual bool is_predicted() const { return _is_predicted; } + virtual bool is_predicated() const { return _predicates_count > 0; } + virtual int predicates_count() const { return _predicates_count; } virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); - virtual Node* generate_predicate(JVMState* jvms); + virtual JVMState* generate(JVMState* jvms); + virtual Node* generate_predicate(JVMState* jvms, int predicate); vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; } }; @@ -107,8 +110,8 @@ vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); } ciMethod* callee() const { return _intrinsic->method(); } - bool try_to_inline(); - Node* try_to_predicate(); + bool try_to_inline(int predicate); + Node* try_to_predicate(int predicate); void push_result() { // Push the result onto the stack. @@ -307,10 +310,19 @@ Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting); Node* get_key_start_from_aescrypt_object(Node* aescrypt_object); Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object); + bool inline_sha_implCompress(vmIntrinsics::ID id); + bool inline_digestBase_implCompressMB(int predicate); + bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA, + bool long_state, address stubAddr, const char *stubName, + Node* src_start, Node* ofs, Node* limit); + Node* get_state_from_sha_object(Node *sha_object); + Node* get_state_from_sha5_object(Node *sha_object); + Node* inline_digestBase_implCompressMB_predicate(int predicate); bool inline_encodeISOArray(); bool inline_updateCRC32(); bool inline_updateBytesCRC32(); bool inline_updateByteBufferCRC32(); + bool inline_multiplyToLen(); }; @@ -319,8 +331,12 @@ vmIntrinsics::ID id = m->intrinsic_id(); assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); - if (DisableIntrinsic[0] != '\0' - && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) { + ccstr disable_intr = NULL; + + if ((DisableIntrinsic[0] != '\0' + && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) || + (method_has_option_value("DisableIntrinsic", disable_intr) + && strstr(disable_intr, vmIntrinsics::name_at(id)) != NULL)) { // disabled by a user request on the command line: // example: -XX:DisableIntrinsic=_hashCode,_getClass return NULL; @@ -367,7 +383,7 @@ } } - bool is_predicted = false; + int predicates = 0; bool does_virtual_dispatch = false; switch (id) { @@ -504,11 +520,32 @@ if (!UseAESIntrinsics) return NULL; break; + case vmIntrinsics::_multiplyToLen: + if (!UseMultiplyToLenIntrinsic) return NULL; + break; + case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt: case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt: if (!UseAESIntrinsics) return NULL; // these two require the predicated logic - is_predicted = true; + predicates = 1; + break; + + case vmIntrinsics::_sha_implCompress: + if (!UseSHA1Intrinsics) return NULL; + break; + + case vmIntrinsics::_sha2_implCompress: + if (!UseSHA256Intrinsics) return NULL; + break; + + case vmIntrinsics::_sha5_implCompress: + if (!UseSHA512Intrinsics) return NULL; + break; + + case vmIntrinsics::_digestBase_implCompressMB: + if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) return NULL; + predicates = 3; break; case vmIntrinsics::_updateCRC32: @@ -577,7 +614,7 @@ if (!InlineUnsafeOps) return NULL; } - return new LibraryIntrinsic(m, is_virtual, is_predicted, does_virtual_dispatch, (vmIntrinsics::ID) id); + return new LibraryIntrinsic(m, is_virtual, predicates, does_virtual_dispatch, (vmIntrinsics::ID) id); } //----------------------register_library_intrinsics----------------------- @@ -586,7 +623,7 @@ // Nothing to do here. } -JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* LibraryIntrinsic::generate(JVMState* jvms) { LibraryCallKit kit(jvms, this); Compile* C = kit.C; int nodes = C->unique(); @@ -601,7 +638,7 @@ const int bci = kit.bci(); // Try to inline the intrinsic. - if (kit.try_to_inline()) { + if (kit.try_to_inline(_last_predicate)) { if (C->print_intrinsics() || C->print_inlining()) { C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); } @@ -634,12 +671,13 @@ return NULL; } -Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) { +Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) { LibraryCallKit kit(jvms, this); Compile* C = kit.C; int nodes = C->unique(); + _last_predicate = predicate; #ifndef PRODUCT - assert(is_predicted(), "sanity"); + assert(is_predicated() && predicate < predicates_count(), "sanity"); if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { char buf[1000]; const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); @@ -649,10 +687,10 @@ ciMethod* callee = kit.callee(); const int bci = kit.bci(); - Node* slow_ctl = kit.try_to_predicate(); + Node* slow_ctl = kit.try_to_predicate(predicate); if (!kit.failing()) { if (C->print_intrinsics() || C->print_inlining()) { - C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); + C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual, predicate)" : "(intrinsic, predicate)"); } C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); if (C->log()) { @@ -681,7 +719,7 @@ return NULL; } -bool LibraryCallKit::try_to_inline() { +bool LibraryCallKit::try_to_inline(int predicate) { // Handle symbolic names for otherwise undistinguished boolean switches: const bool is_store = true; const bool is_native_ptr = true; @@ -875,6 +913,17 @@ case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt: return inline_cipherBlockChaining_AESCrypt(intrinsic_id()); + case vmIntrinsics::_sha_implCompress: + case vmIntrinsics::_sha2_implCompress: + case vmIntrinsics::_sha5_implCompress: + return inline_sha_implCompress(intrinsic_id()); + + case vmIntrinsics::_digestBase_implCompressMB: + return inline_digestBase_implCompressMB(predicate); + + case vmIntrinsics::_multiplyToLen: + return inline_multiplyToLen(); + case vmIntrinsics::_encodeISOArray: return inline_encodeISOArray(); @@ -898,7 +947,7 @@ } } -Node* LibraryCallKit::try_to_predicate() { +Node* LibraryCallKit::try_to_predicate(int predicate) { if (!jvms()->has_method()) { // Root JVMState has a null method. assert(map()->memory()->Opcode() == Op_Parm, ""); @@ -912,6 +961,8 @@ return inline_cipherBlockChaining_AESCrypt_predicate(false); case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt: return inline_cipherBlockChaining_AESCrypt_predicate(true); + case vmIntrinsics::_digestBase_implCompressMB: + return inline_digestBase_implCompressMB_predicate(predicate); default: // If you get here, it may be that someone has added a new intrinsic @@ -2611,7 +2662,8 @@ if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); if (!is_store) { - Node* p = make_load(control(), adr, value_type, type, adr_type, MemNode::unordered, is_volatile); + MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered; + Node* p = make_load(control(), adr, value_type, type, adr_type, mo, is_volatile); // load value switch (type) { case T_BOOLEAN: @@ -3346,7 +3398,7 @@ if (region == NULL) never_see_null = true; Node* p = basic_plus_adr(mirror, offset); const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL; - Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type)); + Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type)); Node* null_ctl = top(); kls = null_check_oop(kls, &null_ctl, never_see_null); if (region != NULL) { @@ -3522,7 +3574,7 @@ phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror()))); // If we fall through, it's a plain class. Get its _super. p = basic_plus_adr(kls, in_bytes(Klass::super_offset())); - kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL)); + kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL)); null_ctl = top(); kls = null_check_oop(kls, &null_ctl); if (null_ctl != top()) { @@ -3604,7 +3656,7 @@ args[which_arg] = arg; Node* p = basic_plus_adr(arg, class_klass_offset); - Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type); + Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type); klasses[which_arg] = _gvn.transform(kls); } @@ -5120,7 +5172,7 @@ // (At this point we can assume disjoint_bases, since types differ.) int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); Node* p1 = basic_plus_adr(dest_klass, ek_offset); - Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM); + Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM); Node* dest_elem_klass = _gvn.transform(n1); Node* cv = generate_checkcast_arraycopy(adr_type, dest_elem_klass, @@ -5695,6 +5747,108 @@ return true; } +//-------------inline_multiplyToLen----------------------------------- +bool LibraryCallKit::inline_multiplyToLen() { + assert(UseMultiplyToLenIntrinsic, "not implementated on this platform"); + + address stubAddr = StubRoutines::multiplyToLen(); + if (stubAddr == NULL) { + return false; // Intrinsic's stub is not implemented on this platform + } + const char* stubName = "multiplyToLen"; + + assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters"); + + Node* x = argument(1); + Node* xlen = argument(2); + Node* y = argument(3); + Node* ylen = argument(4); + Node* z = argument(5); + + const Type* x_type = x->Value(&_gvn); + const Type* y_type = y->Value(&_gvn); + const TypeAryPtr* top_x = x_type->isa_aryptr(); + const TypeAryPtr* top_y = y_type->isa_aryptr(); + if (top_x == NULL || top_x->klass() == NULL || + top_y == NULL || top_y->klass() == NULL) { + // failed array check + return false; + } + + BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type(); + BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type(); + if (x_elem != T_INT || y_elem != T_INT) { + return false; + } + + // Set the original stack and the reexecute bit for the interpreter to reexecute + // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens + // on the return from z array allocation in runtime. + { PreserveReexecuteState preexecs(this); + jvms()->set_should_reexecute(true); + + Node* x_start = array_element_address(x, intcon(0), x_elem); + Node* y_start = array_element_address(y, intcon(0), y_elem); + // 'x_start' points to x array + scaled xlen + // 'y_start' points to y array + scaled ylen + + // Allocate the result array + Node* zlen = _gvn.transform(new(C) AddINode(xlen, ylen)); + ciKlass* klass = ciTypeArrayKlass::make(T_INT); + Node* klass_node = makecon(TypeKlassPtr::make(klass)); + + IdealKit ideal(this); + +#define __ ideal. + Node* one = __ ConI(1); + Node* zero = __ ConI(0); + IdealVariable need_alloc(ideal), z_alloc(ideal); __ declarations_done(); + __ set(need_alloc, zero); + __ set(z_alloc, z); + __ if_then(z, BoolTest::eq, null()); { + __ increment (need_alloc, one); + } __ else_(); { + // Update graphKit memory and control from IdealKit. + sync_kit(ideal); + Node* zlen_arg = load_array_length(z); + // Update IdealKit memory and control from graphKit. + __ sync_kit(this); + __ if_then(zlen_arg, BoolTest::lt, zlen); { + __ increment (need_alloc, one); + } __ end_if(); + } __ end_if(); + + __ if_then(__ value(need_alloc), BoolTest::ne, zero); { + // Update graphKit memory and control from IdealKit. + sync_kit(ideal); + Node * narr = new_array(klass_node, zlen, 1); + // Update IdealKit memory and control from graphKit. + __ sync_kit(this); + __ set(z_alloc, narr); + } __ end_if(); + + sync_kit(ideal); + z = __ value(z_alloc); + // Can't use TypeAryPtr::INTS which uses Bottom offset. + _gvn.set_type(z, TypeOopPtr::make_from_klass(klass)); + // Final sync IdealKit and GraphKit. + final_sync(ideal); +#undef __ + + Node* z_start = array_element_address(z, intcon(0), T_INT); + + Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, + OptoRuntime::multiplyToLen_Type(), + stubAddr, stubName, TypePtr::BOTTOM, + x_start, xlen, y_start, ylen, z_start, zlen); + } // original reexecute is set back here + + C->set_has_split_ifs(true); // Has chance for split-if optimization + set_result(z); + return true; +} + + /** * Calculate CRC32 for byte. * int java.util.zip.CRC32.update(int crc, int b) @@ -5866,10 +6020,26 @@ BasicType bt = field->layout_type(); // Build the resultant type of the load - const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass()); - + const Type *type; + if (bt == T_OBJECT) { + type = TypeOopPtr::make_from_klass(field_klass->as_klass()); + } else { + type = Type::get_const_basic_type(bt); + } + + if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) { + insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier + } // Build the load. - Node* loadedField = make_load(NULL, adr, type, bt, adr_type, MemNode::unordered, is_vol); + MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; + Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, is_vol); + // If reference is volatile, prevent following memory ops from + // floating up past the volatile read. Also prevents commoning + // another volatile read. + if (is_vol) { + // Memory barrier includes bogus read of value to force load BEFORE membar + insert_mem_bar(Op_MemBarAcquire, loadedField); + } return loadedField; } @@ -5996,7 +6166,7 @@ assert(tinst != NULL, "CBC obj is null"); assert(tinst->klass()->is_loaded(), "CBC obj is not loaded"); ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt")); - if (!klass_AESCrypt->is_loaded()) return false; + assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded"); ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass(); const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt); @@ -6071,11 +6241,8 @@ // note cipher==plain is more conservative than the original java code but that's OK // Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) { - // First, check receiver for NULL since it is virtual method. + // The receiver was checked for NULL already. Node* objCBC = argument(0); - objCBC = null_check(objCBC); - - if (stopped()) return NULL; // Always NULL // Load embeddedCipher field of CipherBlockChaining object. Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false); @@ -6122,3 +6289,258 @@ record_for_igvn(region); return _gvn.transform(region); } + +//------------------------------inline_sha_implCompress----------------------- +// +// Calculate SHA (i.e., SHA-1) for single-block byte[] array. +// void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs) +// +// Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array. +// void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs) +// +// Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array. +// void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs) +// +bool LibraryCallKit::inline_sha_implCompress(vmIntrinsics::ID id) { + assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters"); + + Node* sha_obj = argument(0); + Node* src = argument(1); // type oop + Node* ofs = argument(2); // type int + + const Type* src_type = src->Value(&_gvn); + const TypeAryPtr* top_src = src_type->isa_aryptr(); + if (top_src == NULL || top_src->klass() == NULL) { + // failed array check + return false; + } + // Figure out the size and type of the elements we will be copying. + BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type(); + if (src_elem != T_BYTE) { + return false; + } + // 'src_start' points to src array + offset + Node* src_start = array_element_address(src, ofs, src_elem); + Node* state = NULL; + address stubAddr; + const char *stubName; + + switch(id) { + case vmIntrinsics::_sha_implCompress: + assert(UseSHA1Intrinsics, "need SHA1 instruction support"); + state = get_state_from_sha_object(sha_obj); + stubAddr = StubRoutines::sha1_implCompress(); + stubName = "sha1_implCompress"; + break; + case vmIntrinsics::_sha2_implCompress: + assert(UseSHA256Intrinsics, "need SHA256 instruction support"); + state = get_state_from_sha_object(sha_obj); + stubAddr = StubRoutines::sha256_implCompress(); + stubName = "sha256_implCompress"; + break; + case vmIntrinsics::_sha5_implCompress: + assert(UseSHA512Intrinsics, "need SHA512 instruction support"); + state = get_state_from_sha5_object(sha_obj); + stubAddr = StubRoutines::sha512_implCompress(); + stubName = "sha512_implCompress"; + break; + default: + fatal_unexpected_iid(id); + return false; + } + if (state == NULL) return false; + + // Call the stub. + Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::sha_implCompress_Type(), + stubAddr, stubName, TypePtr::BOTTOM, + src_start, state); + + return true; +} + +//------------------------------inline_digestBase_implCompressMB----------------------- +// +// Calculate SHA/SHA2/SHA5 for multi-block byte[] array. +// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) +// +bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) { + assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics, + "need SHA1/SHA256/SHA512 instruction support"); + assert((uint)predicate < 3, "sanity"); + assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters"); + + Node* digestBase_obj = argument(0); // The receiver was checked for NULL already. + Node* src = argument(1); // byte[] array + Node* ofs = argument(2); // type int + Node* limit = argument(3); // type int + + const Type* src_type = src->Value(&_gvn); + const TypeAryPtr* top_src = src_type->isa_aryptr(); + if (top_src == NULL || top_src->klass() == NULL) { + // failed array check + return false; + } + // Figure out the size and type of the elements we will be copying. + BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type(); + if (src_elem != T_BYTE) { + return false; + } + // 'src_start' points to src array + offset + Node* src_start = array_element_address(src, ofs, src_elem); + + const char* klass_SHA_name = NULL; + const char* stub_name = NULL; + address stub_addr = NULL; + bool long_state = false; + + switch (predicate) { + case 0: + if (UseSHA1Intrinsics) { + klass_SHA_name = "sun/security/provider/SHA"; + stub_name = "sha1_implCompressMB"; + stub_addr = StubRoutines::sha1_implCompressMB(); + } + break; + case 1: + if (UseSHA256Intrinsics) { + klass_SHA_name = "sun/security/provider/SHA2"; + stub_name = "sha256_implCompressMB"; + stub_addr = StubRoutines::sha256_implCompressMB(); + } + break; + case 2: + if (UseSHA512Intrinsics) { + klass_SHA_name = "sun/security/provider/SHA5"; + stub_name = "sha512_implCompressMB"; + stub_addr = StubRoutines::sha512_implCompressMB(); + long_state = true; + } + break; + default: + fatal(err_msg_res("unknown SHA intrinsic predicate: %d", predicate)); + } + if (klass_SHA_name != NULL) { + // get DigestBase klass to lookup for SHA klass + const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr(); + assert(tinst != NULL, "digestBase_obj is not instance???"); + assert(tinst->klass()->is_loaded(), "DigestBase is not loaded"); + + ciKlass* klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name)); + assert(klass_SHA->is_loaded(), "predicate checks that this class is loaded"); + ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass(); + return inline_sha_implCompressMB(digestBase_obj, instklass_SHA, long_state, stub_addr, stub_name, src_start, ofs, limit); + } + return false; +} +//------------------------------inline_sha_implCompressMB----------------------- +bool LibraryCallKit::inline_sha_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_SHA, + bool long_state, address stubAddr, const char *stubName, + Node* src_start, Node* ofs, Node* limit) { + const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_SHA); + const TypeOopPtr* xtype = aklass->as_instance_type(); + Node* sha_obj = new (C) CheckCastPPNode(control(), digestBase_obj, xtype); + sha_obj = _gvn.transform(sha_obj); + + Node* state; + if (long_state) { + state = get_state_from_sha5_object(sha_obj); + } else { + state = get_state_from_sha_object(sha_obj); + } + if (state == NULL) return false; + + // Call the stub. + Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, + OptoRuntime::digestBase_implCompressMB_Type(), + stubAddr, stubName, TypePtr::BOTTOM, + src_start, state, ofs, limit); + // return ofs (int) + Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms)); + set_result(result); + + return true; +} + +//------------------------------get_state_from_sha_object----------------------- +Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) { + Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false); + assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2"); + if (sha_state == NULL) return (Node *) NULL; + + // now have the array, need to get the start address of the state array + Node* state = array_element_address(sha_state, intcon(0), T_INT); + return state; +} + +//------------------------------get_state_from_sha5_object----------------------- +Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) { + Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false); + assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5"); + if (sha_state == NULL) return (Node *) NULL; + + // now have the array, need to get the start address of the state array + Node* state = array_element_address(sha_state, intcon(0), T_LONG); + return state; +} + +//----------------------------inline_digestBase_implCompressMB_predicate---------------------------- +// Return node representing slow path of predicate check. +// the pseudo code we want to emulate with this predicate is: +// if (digestBaseObj instanceof SHA/SHA2/SHA5) do_intrinsic, else do_javapath +// +Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) { + assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics, + "need SHA1/SHA256/SHA512 instruction support"); + assert((uint)predicate < 3, "sanity"); + + // The receiver was checked for NULL already. + Node* digestBaseObj = argument(0); + + // get DigestBase klass for instanceOf check + const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr(); + assert(tinst != NULL, "digestBaseObj is null"); + assert(tinst->klass()->is_loaded(), "DigestBase is not loaded"); + + const char* klass_SHA_name = NULL; + switch (predicate) { + case 0: + if (UseSHA1Intrinsics) { + // we want to do an instanceof comparison against the SHA class + klass_SHA_name = "sun/security/provider/SHA"; + } + break; + case 1: + if (UseSHA256Intrinsics) { + // we want to do an instanceof comparison against the SHA2 class + klass_SHA_name = "sun/security/provider/SHA2"; + } + break; + case 2: + if (UseSHA512Intrinsics) { + // we want to do an instanceof comparison against the SHA5 class + klass_SHA_name = "sun/security/provider/SHA5"; + } + break; + default: + fatal(err_msg_res("unknown SHA intrinsic predicate: %d", predicate)); + } + + ciKlass* klass_SHA = NULL; + if (klass_SHA_name != NULL) { + klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name)); + } + if ((klass_SHA == NULL) || !klass_SHA->is_loaded()) { + // if none of SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path + Node* ctrl = control(); + set_control(top()); // no intrinsic path + return ctrl; + } + ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass(); + + Node* instofSHA = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass_SHA))); + Node* cmp_instof = _gvn.transform(new (C) CmpINode(instofSHA, intcon(1))); + Node* bool_instof = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne)); + Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN); + + return instof_false; // even if it is NULL +} --- ./hotspot/src/share/vm/opto/loopTransform.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/loopTransform.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -269,10 +269,9 @@ bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const { Node *test = ((IdealLoopTree*)this)->tail(); int body_size = ((IdealLoopTree*)this)->_body.size(); - int live_node_count = phase->C->live_nodes(); // Peeling does loop cloning which can result in O(N^2) node construction if( body_size > 255 /* Prevent overflow for large body_size */ - || (body_size * body_size + live_node_count > MaxNodeLimit) ) { + || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) { return false; // too large to safely clone } while( test != _head ) { // Scan till run off top of loop @@ -601,7 +600,7 @@ return false; if (new_body_size > unroll_limit || // Unrolling can result in a large amount of node construction - new_body_size >= MaxNodeLimit - (uint) phase->C->live_nodes()) { + new_body_size >= phase->C->max_node_limit() - phase->C->live_nodes()) { return false; } @@ -882,6 +881,20 @@ return n; } +bool PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) { + Node* castii = new (C) CastIINode(incr, TypeInt::INT, true); + castii->set_req(0, ctrl); + register_new_node(castii, ctrl); + for (DUIterator_Fast imax, i = incr->fast_outs(imax); i < imax; i++) { + Node* n = incr->fast_out(i); + if (n->is_Phi() && n->in(0) == loop) { + int nrep = n->replace_edge(incr, castii); + return true; + } + } + return false; +} + //------------------------------insert_pre_post_loops-------------------------- // Insert pre and post loops. If peel_only is set, the pre-loop can not have // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no @@ -1080,6 +1093,24 @@ } } + // Nodes inside the loop may be control dependent on a predicate + // that was moved before the preloop. If the back branch of the main + // or post loops becomes dead, those nodes won't be dependent on the + // test that guards that loop nest anymore which could lead to an + // incorrect array access because it executes independently of the + // test that was guarding the loop nest. We add a special CastII on + // the if branch that enters the loop, between the input induction + // variable value and the induction variable Phi to preserve correct + // dependencies. + + // CastII for the post loop: + bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head); + assert(inserted, "no castII inserted"); + + // CastII for the main loop: + inserted = cast_incr_before_loop(pre_incr, min_taken, main_head); + assert(inserted, "no castII inserted"); + // Step B4: Shorten the pre-loop to run only 1 iteration (for now). // RCE and alignment may change this later. Node *cmp_end = pre_end->cmp_node(); @@ -2287,8 +2318,8 @@ // Skip next optimizations if running low on nodes. Note that // policy_unswitching and policy_maximally_unroll have this check. - uint nodes_left = MaxNodeLimit - (uint) phase->C->live_nodes(); - if ((2 * _body.size()) > nodes_left) { + int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes(); + if ((int)(2 * _body.size()) > nodes_left) { return true; } --- ./hotspot/src/share/vm/opto/loopUnswitch.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/loopUnswitch.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -59,8 +59,8 @@ if (!_head->is_Loop()) { return false; } - uint nodes_left = MaxNodeLimit - phase->C->live_nodes(); - if (2 * _body.size() > nodes_left) { + int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes(); + if ((int)(2 * _body.size()) > nodes_left) { return false; // Too speculative if running low on nodes. } LoopNode* head = _head->as_Loop(); --- ./hotspot/src/share/vm/opto/loopnode.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/loopnode.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -602,6 +602,8 @@ return ctrl; } + bool cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop); + public: bool has_node( Node* n ) const { guarantee(n != NULL, "No Node."); --- ./hotspot/src/share/vm/opto/loopopts.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/loopopts.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -239,8 +239,13 @@ ProjNode* dp_proj = dp->as_Proj(); ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj(); if (exclude_loop_predicate && - unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate)) + (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) || + unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check))) { + // If this is a range check (IfNode::is_range_check), do not + // reorder because Compile::allow_range_check_smearing might have + // changed the check. return; // Let IGVN transformation change control dependence. + } IdealLoopTree *old_loop = get_loop(dp); @@ -734,7 +739,7 @@ for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { weight += region->fast_out(i)->outcnt(); } - int nodes_left = MaxNodeLimit - C->live_nodes(); + int nodes_left = C->max_node_limit() - C->live_nodes(); if (weight * 8 > nodes_left) { #ifndef PRODUCT if (PrintOpto) @@ -896,23 +901,23 @@ int n_op = n->Opcode(); // Check for an IF being dominated by another IF same test - if( n_op == Op_If ) { + if (n_op == Op_If) { Node *bol = n->in(1); uint max = bol->outcnt(); // Check for same test used more than once? - if( n_op == Op_If && max > 1 && bol->is_Bool() ) { + if (max > 1 && bol->is_Bool()) { // Search up IDOMs to see if this IF is dominated. Node *cutoff = get_ctrl(bol); // Now search up IDOMs till cutoff, looking for a dominating test Node *prevdom = n; Node *dom = idom(prevdom); - while( dom != cutoff ) { - if( dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom ) { + while (dom != cutoff) { + if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) { // Replace the dominated test with an obvious true or false. // Place it on the IGVN worklist for later cleanup. C->set_major_progress(); - dominated_by( prevdom, n, false, true ); + dominated_by(prevdom, n, false, true); #ifndef PRODUCT if( VerifyLoopOptimizations ) verify(); #endif --- ./hotspot/src/share/vm/opto/machnode.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/machnode.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -639,7 +639,6 @@ } #endif - bool MachCallNode::return_value_is_used() const { if (tf()->range()->cnt() == TypeFunc::Parms) { // void return @@ -657,6 +656,14 @@ return false; } +// Similar to cousin class CallNode::returns_pointer +// Because this is used in deoptimization, we want the type info, not the data +// flow info; the interpreter will "use" things that are dead to the optimizer. +bool MachCallNode::returns_pointer() const { + const TypeTuple *r = tf()->range(); + return (r->cnt() > TypeFunc::Parms && + r->field_at(TypeFunc::Parms)->isa_ptr()); +} //------------------------------Registers-------------------------------------- const RegMask &MachCallNode::in_RegMask(uint idx) const { --- ./hotspot/src/share/vm/opto/machnode.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/machnode.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -784,6 +784,10 @@ bool returns_long() const { return tf()->return_type() == T_LONG; } bool return_value_is_used() const; + + // Similar to cousin class CallNode::returns_pointer + bool returns_pointer() const; + #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; #endif --- ./hotspot/src/share/vm/opto/macro.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/macro.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -699,6 +699,7 @@ ciType* elem_type; Node* res = alloc->result_cast(); + assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result"); const TypeOopPtr* res_type = NULL; if (res != NULL) { // Could be NULL when there are no users res_type = _igvn.type(res)->isa_oopptr(); @@ -963,7 +964,11 @@ } bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { - if (!EliminateAllocations || !alloc->_is_non_escaping) { + // Don't do scalar replacement if the frame can be popped by JVMTI: + // if reallocation fails during deoptimization we'll pop all + // interpreter frames for this compiled frame and that won't play + // nice with JVMTI popframe. + if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) { return false; } Node* klass = alloc->in(AllocateNode::KlassNode); @@ -1031,6 +1036,8 @@ return false; } + assert(boxing->result_cast() == NULL, "unexpected boxing node result"); + extract_call_projections(boxing); const TypeTuple* r = boxing->tf()->range(); @@ -2191,7 +2198,7 @@ Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn); if (klass_node == NULL) { Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); - klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) ); + klass_node = transform_later(LoadKlassNode::make(_igvn, NULL, mem, k_adr, _igvn.type(k_adr)->is_ptr())); #ifdef _LP64 if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) { assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity"); --- ./hotspot/src/share/vm/opto/memnode.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/memnode.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -859,6 +859,10 @@ //============================================================================= +// Should LoadNode::Ideal() attempt to remove control edges? +bool LoadNode::can_remove_control() const { + return true; +} uint LoadNode::size_of() const { return sizeof(*this); } uint LoadNode::cmp( const Node &n ) const { return !Type::cmp( _type, ((LoadNode&)n)._type ); } @@ -1251,6 +1255,16 @@ result = new (phase->C) ConvI2LNode(phase->transform(result)); } #endif + // Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair). + // Need to preserve unboxing load type if it is unsigned. + switch(this->Opcode()) { + case Op_LoadUB: + result = new (phase->C) AndINode(phase->transform(result), phase->intcon(0xFF)); + break; + case Op_LoadUS: + result = new (phase->C) AndINode(phase->transform(result), phase->intcon(0xFFFF)); + break; + } return result; } } @@ -1455,7 +1469,7 @@ } //------------------------------Ideal------------------------------------------ -// If the load is from Field memory and the pointer is non-null, we can +// If the load is from Field memory and the pointer is non-null, it might be possible to // zero out the control input. // If the offset is constant and the base is an object allocation, // try to hook me up to the exact initializing store. @@ -1480,6 +1494,7 @@ && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) { // Check for useless control edge in some common special cases if (in(MemNode::Control) != NULL + && can_remove_control() && phase->type(base)->higher_equal(TypePtr::NOTNULL) && all_controls_dominate(base, phase->C->start())) { // A method-invariant, non-null address (constant or 'this' argument). @@ -2007,9 +2022,8 @@ //============================================================================= //----------------------------LoadKlassNode::make------------------------------ // Polymorphic factory method: -Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) { +Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk) { Compile* C = gvn.C; - Node *ctl = NULL; // sanity check the alias category against the created node type const TypePtr *adr_type = adr->bottom_type()->isa_ptr(); assert(adr_type != NULL, "expecting TypeKlassPtr"); @@ -2029,6 +2043,12 @@ return klass_value_common(phase); } +// In most cases, LoadKlassNode does not have the control input set. If the control +// input is set, it must not be removed (by LoadNode::Ideal()). +bool LoadKlassNode::can_remove_control() const { + return false; +} + const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const { // Either input is TOP ==> the result is TOP const Type *t1 = phase->type( in(MemNode::Memory) ); --- ./hotspot/src/share/vm/opto/memnode.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/memnode.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -148,6 +148,8 @@ protected: virtual uint cmp(const Node &n) const; virtual uint size_of() const; // Size is bigger + // Should LoadNode::Ideal() attempt to remove control edges? + virtual bool can_remove_control() const; const Type* const _type; // What kind of value is loaded? public: @@ -171,8 +173,10 @@ // we are equivalent to. We look for Load of a Store. virtual Node *Identity( PhaseTransform *phase ); - // If the load is from Field memory and the pointer is non-null, we can + // If the load is from Field memory and the pointer is non-null, it might be possible to // zero out the control input. + // If the offset is constant and the base is an object allocation, + // try to hook me up to the exact initializing store. virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); // Split instance field load through Phi. @@ -413,6 +417,10 @@ //------------------------------LoadKlassNode---------------------------------- // Load a Klass from an object class LoadKlassNode : public LoadPNode { +protected: + // In most cases, LoadKlassNode does not have the control input set. If the control + // input is set, it must not be removed (by LoadNode::Ideal()). + virtual bool can_remove_control() const; public: LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) : LoadPNode(c, mem, adr, at, tk, mo) {} @@ -422,8 +430,8 @@ virtual bool depends_only_on_test() const { return true; } // Polymorphic factory method: - static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, - const TypeKlassPtr *tk = TypeKlassPtr::OBJECT ); + static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, + const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); }; //------------------------------LoadNKlassNode--------------------------------- --- ./hotspot/src/share/vm/opto/multnode.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/multnode.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -194,7 +194,9 @@ } } - ProjNode* other_proj = iff->proj_out(1-_con)->as_Proj(); + ProjNode* other_proj = iff->proj_out(1-_con); + if (other_proj == NULL) // Should never happen, but make Parfait happy. + return false; if (other_proj->is_uncommon_trap_proj(reason)) { assert(reason == Deoptimization::Reason_none || Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list"); --- ./hotspot/src/share/vm/opto/node.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/node.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -69,7 +69,7 @@ Compile::set_debug_idx(new_debug_idx); set_debug_idx( new_debug_idx ); assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX"); - assert(Compile::current()->live_nodes() < (uint)MaxNodeLimit, "Live Node limit exceeded limit"); + assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit"); if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) { tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx); BREAKPOINT; @@ -326,7 +326,7 @@ Node::Node(uint req) : _idx(IDX_INIT(req)) { - assert( req < (uint)(MaxNodeLimit - NodeLimitFudgeFactor), "Input limit exceeded" ); + assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" ); debug_only( verify_construction() ); NOT_PRODUCT(nodes_created++); if (req == 0) { @@ -527,6 +527,9 @@ if (n->is_Call()) { n->as_Call()->clone_jvms(C); } + if (n->is_SafePoint()) { + n->as_SafePoint()->clone_replaced_nodes(); + } return n; // Return the clone } @@ -622,6 +625,9 @@ if (is_expensive()) { compile->remove_expensive_node(this); } + if (is_SafePoint()) { + as_SafePoint()->delete_replaced_nodes(); + } #ifdef ASSERT // We will not actually delete the storage, but we'll make the node unusable. *(address*)this = badAddress; // smash the C++ vtbl, probably @@ -1087,6 +1093,9 @@ if( this->is_Store() ) { // Condition for back-to-back stores folding. return n->Opcode() == op && n->in(MemNode::Memory) == this; + } else if (this->is_Load()) { + // Condition for removing an unused LoadNode from the MemBarAcquire precedence input + return n->Opcode() == Op_MemBarAcquire; } else if( op == Op_AddL ) { // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y)) return n->Opcode() == Op_ConvL2I && n->in(1) == this; --- ./hotspot/src/share/vm/opto/output.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/output.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -783,9 +783,10 @@ // grow downwards in all implementations. // (If, on some machine, the interpreter's Java locals or stack // were to grow upwards, the embedded doubles would be word-swapped.) - jint *dp = (jint*)&d; - array->append(new ConstantIntValue(dp[1])); - array->append(new ConstantIntValue(dp[0])); + jlong_accessor acc; + acc.long_value = jlong_cast(d); + array->append(new ConstantIntValue(acc.words[1])); + array->append(new ConstantIntValue(acc.words[0])); #endif break; } @@ -802,9 +803,10 @@ // grow downwards in all implementations. // (If, on some machine, the interpreter's Java locals or stack // were to grow upwards, the embedded doubles would be word-swapped.) - jint *dp = (jint*)&d; - array->append(new ConstantIntValue(dp[1])); - array->append(new ConstantIntValue(dp[0])); + jlong_accessor acc; + acc.long_value = d; + array->append(new ConstantIntValue(acc.words[1])); + array->append(new ConstantIntValue(acc.words[0])); #endif break; } @@ -854,8 +856,7 @@ } // Check if a call returns an object. - if (mcall->return_value_is_used() && - mcall->tf()->range()->field_at(TypeFunc::Parms)->isa_ptr()) { + if (mcall->returns_pointer()) { return_oop = true; } safepoint_pc_offset += mcall->ret_addr_offset(); --- ./hotspot/src/share/vm/opto/parse.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/parse.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -142,7 +142,7 @@ void print_value_on(outputStream* st) const PRODUCT_RETURN; - bool _forced_inline; // Inlining was forced by CompilerOracle or ciReplay + bool _forced_inline; // Inlining was forced by CompilerOracle, ciReplay or annotation bool forced_inline() const { return _forced_inline; } // Count number of nodes in this subtree int count() const; @@ -357,12 +357,13 @@ int _est_switch_depth; // Debugging SwitchRanges. #endif - // parser for the caller of the method of this object - Parse* const _parent; + bool _first_return; // true if return is the first to be parsed + bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths? + uint _new_idx; // any node with _idx above were new during this parsing. Used to trim the replaced nodes list. public: // Constructor - Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent); + Parse(JVMState* caller, ciMethod* parse_method, float expected_uses); virtual Parse* is_Parse() const { return (Parse*)this; } @@ -419,8 +420,6 @@ return block()->successor_for_bci(bci); } - Parse* parent_parser() const { return _parent; } - private: // Create a JVMS & map for the initial state of this method. SafePointNode* create_entry_map(); @@ -552,8 +551,9 @@ float dynamic_branch_prediction(float &cnt); float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci); - bool seems_never_taken(float prob); - bool seems_stable_comparison(BoolTest::mask btest, Node* c); + bool seems_never_taken(float prob) const; + bool path_is_suitable_for_uncommon_trap(float prob) const; + bool seems_stable_comparison() const; void do_ifnull(BoolTest::mask btest, Node* c); void do_if(BoolTest::mask btest, Node* c); --- ./hotspot/src/share/vm/opto/parse1.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/parse1.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -381,8 +381,8 @@ //------------------------------Parse------------------------------------------ // Main parser constructor. -Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent) - : _exits(caller), _parent(parent) +Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) + : _exits(caller) { // Init some variables _caller = caller; @@ -395,6 +395,9 @@ _entry_bci = InvocationEntryBci; _tf = NULL; _block = NULL; + _first_return = true; + _replaced_nodes_for_exceptions = false; + _new_idx = C->unique(); debug_only(_block_count = -1); debug_only(_blocks = (Block*)-1); #ifndef PRODUCT @@ -565,12 +568,13 @@ set_map(entry_map); do_method_entry(); } - if (depth() == 1) { + + if (depth() == 1 && !failing()) { // Add check to deoptimize the nmethod if RTM state was changed rtm_deopt(); } - // Check for bailouts during method entry. + // Check for bailouts during method entry or RTM state check setup. if (failing()) { if (log) log->done("parse"); C->set_default_node_notes(caller_nn); @@ -894,6 +898,10 @@ for (uint i = 0; i < TypeFunc::Parms; i++) { caller.map()->set_req(i, ex_map->in(i)); } + if (ex_map->has_replaced_nodes()) { + _replaced_nodes_for_exceptions = true; + } + caller.map()->transfer_replaced_nodes_from(ex_map, _new_idx); // ...and the exception: Node* ex_oop = saved_ex_oop(ex_map); SafePointNode* caller_ex_map = caller.make_exception_state(ex_oop); @@ -962,7 +970,7 @@ bool do_synch = method()->is_synchronized() && GenerateSynchronizationCode; // record exit from a method if compiled while Dtrace is turned on. - if (do_synch || C->env()->dtrace_method_probes()) { + if (do_synch || C->env()->dtrace_method_probes() || _replaced_nodes_for_exceptions) { // First move the exception list out of _exits: GraphKit kit(_exits.transfer_exceptions_into_jvms()); SafePointNode* normal_map = kit.map(); // keep this guy safe @@ -987,6 +995,9 @@ if (C->env()->dtrace_method_probes()) { kit.make_dtrace_method_exit(method()); } + if (_replaced_nodes_for_exceptions) { + kit.map()->apply_replaced_nodes(); + } // Done with exception-path processing. ex_map = kit.make_exception_state(ex_oop); assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity"); @@ -1006,6 +1017,7 @@ _exits.add_exception_state(ex_map); } } + _exits.map()->apply_replaced_nodes(); } //-----------------------------create_entry_map------------------------------- @@ -1020,6 +1032,9 @@ return NULL; } + // clear current replaced nodes that are of no use from here on (map was cloned in build_exits). + _caller->map()->delete_replaced_nodes(); + // If this is an inlined method, we may have to do a receiver null check. if (_caller->has_method() && is_normal_parse() && !method()->is_static()) { GraphKit kit(_caller); @@ -1043,6 +1058,8 @@ SafePointNode* inmap = _caller->map(); assert(inmap != NULL, "must have inmap"); + // In case of null check on receiver above + map()->transfer_replaced_nodes_from(inmap, _new_idx); uint i; @@ -1672,6 +1689,8 @@ set_control(r->nonnull_req()); } + map()->merge_replaced_nodes_with(newin); + // newin has been subsumed into the lazy merge, and is now dead. set_block(save_block); @@ -1939,7 +1958,7 @@ // finalization. In general this will fold up since the concrete // class is often visible so the access flags are constant. Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() ); - Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) ); + Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), klass_addr, TypeInstPtr::KLASS)); Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset())); Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered); @@ -2076,6 +2095,13 @@ phi->add_req(value); } + if (_first_return) { + _exits.map()->transfer_replaced_nodes_from(map(), _new_idx); + _first_return = false; + } else { + _exits.map()->merge_replaced_nodes_with(map()); + } + stop_and_kill_map(); // This CFG path dies here } --- ./hotspot/src/share/vm/opto/parse2.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/parse2.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -884,7 +884,7 @@ // some branches (e.g., _213_javac.Assembler.eliminate) validly produce // very small but nonzero probabilities, which if confused with zero // counts would keep the program recompiling indefinitely. -bool Parse::seems_never_taken(float prob) { +bool Parse::seems_never_taken(float prob) const { return prob < PROB_MIN; } @@ -895,53 +895,12 @@ // if a path is never taken, its controlling comparison is // already acting in a stable fashion. If the comparison // seems stable, we will put an expensive uncommon trap -// on the untaken path. To be conservative, and to allow -// partially executed counted loops to be compiled fully, -// we will plant uncommon traps only after pointer comparisons. -bool Parse::seems_stable_comparison(BoolTest::mask btest, Node* cmp) { - for (int depth = 4; depth > 0; depth--) { - // The following switch can find CmpP here over half the time for - // dynamic language code rich with type tests. - // Code using counted loops or array manipulations (typical - // of benchmarks) will have many (>80%) CmpI instructions. - switch (cmp->Opcode()) { - case Op_CmpP: - // A never-taken null check looks like CmpP/BoolTest::eq. - // These certainly should be closed off as uncommon traps. - if (btest == BoolTest::eq) - return true; - // A never-failed type check looks like CmpP/BoolTest::ne. - // Let's put traps on those, too, so that we don't have to compile - // unused paths with indeterminate dynamic type information. - if (ProfileDynamicTypes) - return true; - return false; - - case Op_CmpI: - // A small minority (< 10%) of CmpP are masked as CmpI, - // as if by boolean conversion ((p == q? 1: 0) != 0). - // Detect that here, even if it hasn't optimized away yet. - // Specifically, this covers the 'instanceof' operator. - if (btest == BoolTest::ne || btest == BoolTest::eq) { - if (_gvn.type(cmp->in(2))->singleton() && - cmp->in(1)->is_Phi()) { - PhiNode* phi = cmp->in(1)->as_Phi(); - int true_path = phi->is_diamond_phi(); - if (true_path > 0 && - _gvn.type(phi->in(1))->singleton() && - _gvn.type(phi->in(2))->singleton()) { - // phi->region->if_proj->ifnode->bool->cmp - BoolNode* bol = phi->in(0)->in(1)->in(0)->in(1)->as_Bool(); - btest = bol->_test._test; - cmp = bol->in(1); - continue; - } - } - } - return false; - } +// on the untaken path. +bool Parse::seems_stable_comparison() const { + if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { + return false; } - return false; + return true; } //-------------------------------repush_if_args-------------------------------- @@ -1166,6 +1125,14 @@ } } +bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { + // Don't want to speculate on uncommon traps when running with -Xcomp + if (!UseInterpreter) { + return false; + } + return (seems_never_taken(prob) && seems_stable_comparison()); +} + //----------------------------adjust_map_after_if------------------------------ // Adjust the JVM state to reflect the result of taking this path. // Basically, it means inspecting the CmpNode controlling this @@ -1179,33 +1146,9 @@ bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); - if (seems_never_taken(prob) && seems_stable_comparison(btest, c)) { - // If this might possibly turn into an implicit null check, - // and the null has never yet been seen, we need to generate - // an uncommon trap, so as to recompile instead of suffering - // with very slow branches. (We'll get the slow branches if - // the program ever changes phase and starts seeing nulls here.) - // - // We do not inspect for a null constant, since a node may - // optimize to 'null' later on. - // - // Null checks, and other tests which expect inequality, - // show btest == BoolTest::eq along the non-taken branch. - // On the other hand, type tests, must-be-null tests, - // and other tests which expect pointer equality, - // show btest == BoolTest::ne along the non-taken branch. - // We prune both types of branches if they look unused. + if (path_is_suitable_for_uncommon_trap(prob)) { repush_if_args(); - // We need to mark this branch as taken so that if we recompile we will - // see that it is possible. In the tiered system the interpreter doesn't - // do profiling and by the time we get to the lower tier from the interpreter - // the path may be cold again. Make sure it doesn't look untaken - if (is_fallthrough) { - profile_not_taken_branch(!ProfileInterpreter); - } else { - profile_taken_branch(iter().get_dest(), !ProfileInterpreter); - } - uncommon_trap(Deoptimization::Reason_unreached, + uncommon_trap(Deoptimization::Reason_unstable_if, Deoptimization::Action_reinterpret, NULL, (is_fallthrough ? "taken always" : "taken never")); --- ./hotspot/src/share/vm/opto/parseHelper.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/parseHelper.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -156,22 +156,43 @@ int klass_offset = oopDesc::klass_offset_in_bytes(); Node* p = basic_plus_adr( ary, ary, klass_offset ); // p's type is array-of-OOPS plus klass_offset - Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) ); + Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS)); // Get the array klass const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr(); - // array_klass's type is generally INexact array-of-oop. Heroically - // cast the array klass to EXACT array and uncommon-trap if the cast - // fails. + // The type of array_klass is usually INexact array-of-oop. Heroically + // cast array_klass to EXACT array and uncommon-trap if the cast fails. + // Make constant out of the inexact array klass, but use it only if the cast + // succeeds. bool always_see_exact_class = false; if (MonomorphicArrayCheck - && !too_many_traps(Deoptimization::Reason_array_check)) { + && !too_many_traps(Deoptimization::Reason_array_check) + && !tak->klass_is_exact() + && tak != TypeKlassPtr::OBJECT) { + // Regarding the fourth condition in the if-statement from above: + // + // If the compiler has determined that the type of array 'ary' (represented + // by 'array_klass') is java/lang/Object, the compiler must not assume that + // the array 'ary' is monomorphic. + // + // If 'ary' were of type java/lang/Object, this arraystore would have to fail, + // because it is not possible to perform a arraystore into an object that is not + // a "proper" array. + // + // Therefore, let's obtain at runtime the type of 'ary' and check if we can still + // successfully perform the store. + // + // The implementation reasons for the condition are the following: + // + // java/lang/Object is the superclass of all arrays, but it is represented by the VM + // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect + // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses. + // + // See issue JDK-8057622 for details. + always_see_exact_class = true; // (If no MDO at all, hope for the best, until a trap actually occurs.) - } - // Is the array klass is exactly its defined type? - if (always_see_exact_class && !tak->klass_is_exact()) { // Make a constant out of the inexact array klass const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr(); Node* con = makecon(extak); @@ -202,11 +223,15 @@ // Extract the array element class int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset()); Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset); - Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) ); + // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true, + // we must set a control edge from the IfTrue node created by the uncommon_trap above to the + // LoadKlassNode. + Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL, + immutable_memory(), p2, tak)); // Check (the hard way) and throw if not a subklass. // Result is ignored, we just need the CFG effects. - gen_checkcast( obj, a_e_klass ); + gen_checkcast(obj, a_e_klass); } --- ./hotspot/src/share/vm/opto/phaseX.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/phaseX.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1340,15 +1340,27 @@ } } - if( use->is_Cmp() ) { // Enable CMP/BOOL optimization + uint use_op = use->Opcode(); + if(use->is_Cmp()) { // Enable CMP/BOOL optimization add_users_to_worklist(use); // Put Bool on worklist - // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the - // phi merging either 0 or 1 onto the worklist if (use->outcnt() > 0) { Node* bol = use->raw_out(0); if (bol->outcnt() > 0) { Node* iff = bol->raw_out(0); - if (iff->outcnt() == 2) { + if (use_op == Op_CmpI && + iff->is_CountedLoopEnd()) { + CountedLoopEndNode* cle = iff->as_CountedLoopEnd(); + if (cle->limit() == n && cle->phi() != NULL) { + // If an opaque node feeds into the limit condition of a + // CountedLoop, we need to process the Phi node for the + // induction variable when the opaque node is removed: + // the range of values taken by the Phi is now known and + // so its type is also known. + _worklist.push(cle->phi()); + } + } else if (iff->outcnt() == 2) { + // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the + // phi merging either 0 or 1 onto the worklist Node* ifproj0 = iff->raw_out(0); Node* ifproj1 = iff->raw_out(1); if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) { @@ -1360,9 +1372,26 @@ } } } + if (use_op == Op_CmpI) { + Node* in1 = use->in(1); + for (uint i = 0; i < in1->outcnt(); i++) { + if (in1->raw_out(i)->Opcode() == Op_CastII) { + Node* castii = in1->raw_out(i); + if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) { + Node* ifnode = castii->in(0)->in(0); + if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) { + // Reprocess a CastII node that may depend on an + // opaque node value when the opaque node is + // removed. In case it carries a dependency we can do + // a better job of computing its type. + _worklist.push(castii); + } + } + } + } + } } - uint use_op = use->Opcode(); // If changed Cast input, check Phi users for simple cycles if( use->is_ConstraintCast() || use->is_CheckCastPP() ) { for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/opto/replacednodes.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "opto/cfgnode.hpp" +#include "opto/phaseX.hpp" +#include "opto/replacednodes.hpp" + +void ReplacedNodes::allocate_if_necessary() { + if (_replaced_nodes == NULL) { + _replaced_nodes = new GrowableArray(); + } +} + +bool ReplacedNodes::is_empty() const { + return _replaced_nodes == NULL || _replaced_nodes->length() == 0; +} + +bool ReplacedNodes::has_node(const ReplacedNode& r) const { + return _replaced_nodes->find(r) != -1; +} + +bool ReplacedNodes::has_target_node(Node* n) const { + for (int i = 0; i < _replaced_nodes->length(); i++) { + if (_replaced_nodes->at(i).improved() == n) { + return true; + } + } + return false; +} + +// Record replaced node if not seen before +void ReplacedNodes::record(Node* initial, Node* improved) { + allocate_if_necessary(); + ReplacedNode r(initial, improved); + if (!has_node(r)) { + _replaced_nodes->push(r); + } +} + +// Copy replaced nodes from one map to another. idx is used to +// identify nodes that are too new to be of interest in the target +// node list. +void ReplacedNodes::transfer_from(const ReplacedNodes& other, uint idx) { + if (other.is_empty()) { + return; + } + allocate_if_necessary(); + for (int i = 0; i < other._replaced_nodes->length(); i++) { + ReplacedNode replaced = other._replaced_nodes->at(i); + // Only transfer the nodes that can actually be useful + if (!has_node(replaced) && (replaced.initial()->_idx < idx || has_target_node(replaced.initial()))) { + _replaced_nodes->push(replaced); + } + } +} + +void ReplacedNodes::clone() { + if (_replaced_nodes != NULL) { + GrowableArray* replaced_nodes_clone = new GrowableArray(); + replaced_nodes_clone->appendAll(_replaced_nodes); + _replaced_nodes = replaced_nodes_clone; + } +} + +void ReplacedNodes::reset() { + if (_replaced_nodes != NULL) { + _replaced_nodes->clear(); + } +} + +// Perfom node replacement (used when returning to caller) +void ReplacedNodes::apply(Node* n) { + if (is_empty()) { + return; + } + for (int i = 0; i < _replaced_nodes->length(); i++) { + ReplacedNode replaced = _replaced_nodes->at(i); + n->replace_edge(replaced.initial(), replaced.improved()); + } +} + +static void enqueue_use(Node* n, Node* use, Unique_Node_List& work) { + if (use->is_Phi()) { + Node* r = use->in(0); + assert(r->is_Region(), "Phi should have Region"); + for (uint i = 1; i < use->req(); i++) { + if (use->in(i) == n) { + work.push(r->in(i)); + } + } + } else { + work.push(use); + } +} + +// Perfom node replacement following late inlining +void ReplacedNodes::apply(Compile* C, Node* ctl) { + // ctl is the control on exit of the method that was late inlined + if (is_empty()) { + return; + } + for (int i = 0; i < _replaced_nodes->length(); i++) { + ReplacedNode replaced = _replaced_nodes->at(i); + Node* initial = replaced.initial(); + Node* improved = replaced.improved(); + assert (ctl != NULL && !ctl->is_top(), "replaced node should have actual control"); + + ResourceMark rm; + Unique_Node_List work; + // Go over all the uses of the node that is considered for replacement... + for (DUIterator j = initial->outs(); initial->has_out(j); j++) { + Node* use = initial->out(j); + + if (use == improved || use->outcnt() == 0) { + continue; + } + work.clear(); + enqueue_use(initial, use, work); + bool replace = true; + // Check that this use is dominated by ctl. Go ahead with the + // replacement if it is. + while (work.size() != 0 && replace) { + Node* n = work.pop(); + if (use->outcnt() == 0) { + continue; + } + if (n->is_CFG() || (n->in(0) != NULL && !n->in(0)->is_top())) { + int depth = 0; + Node *m = n; + if (!n->is_CFG()) { + n = n->in(0); + } + assert(n->is_CFG(), "should be CFG now"); + while(n != ctl) { + n = IfNode::up_one_dom(n); + depth++; + // limit search depth + if (depth >= 100 || n == NULL) { + replace = false; + break; + } + } + } else { + for (DUIterator k = n->outs(); n->has_out(k); k++) { + enqueue_use(n, n->out(k), work); + } + } + } + if (replace) { + bool is_in_table = C->initial_gvn()->hash_delete(use); + int replaced = use->replace_edge(initial, improved); + if (is_in_table) { + C->initial_gvn()->hash_find_insert(use); + } + C->record_for_igvn(use); + + assert(replaced > 0, "inconsistent"); + --j; + } + } + } +} + +void ReplacedNodes::dump(outputStream *st) const { + if (!is_empty()) { + st->print("replaced nodes: "); + for (int i = 0; i < _replaced_nodes->length(); i++) { + st->print("%d->%d", _replaced_nodes->at(i).initial()->_idx, _replaced_nodes->at(i).improved()->_idx); + if (i < _replaced_nodes->length()-1) { + st->print(","); + } + } + } +} + +// Merge 2 list of replaced node at a point where control flow paths merge +void ReplacedNodes::merge_with(const ReplacedNodes& other) { + if (is_empty()) { + return; + } + if (other.is_empty()) { + reset(); + return; + } + int shift = 0; + int len = _replaced_nodes->length(); + for (int i = 0; i < len; i++) { + if (!other.has_node(_replaced_nodes->at(i))) { + shift++; + } else if (shift > 0) { + _replaced_nodes->at_put(i-shift, _replaced_nodes->at(i)); + } + } + if (shift > 0) { + _replaced_nodes->trunc_to(len - shift); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/opto/replacednodes.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OPTO_REPLACEDNODES_HPP +#define SHARE_VM_OPTO_REPLACEDNODES_HPP + +#include "opto/connode.hpp" + +// During parsing, when a node is "improved", +// GraphKit::replace_in_map() is called to update the current map so +// that the improved node is used from that point +// on. GraphKit::replace_in_map() doesn't operate on the callers maps +// and so some optimization opportunities may be lost. The +// ReplacedNodes class addresses that problem. +// +// A ReplacedNodes object is a list of pair of nodes. Every +// SafePointNode carries a ReplacedNodes object. Every time +// GraphKit::replace_in_map() is called, a new pair of nodes is pushed +// on the list of replaced nodes. When control flow paths merge, their +// replaced nodes are also merged. When parsing exits a method to +// return to a caller, the replaced nodes on the exit path are used to +// update the caller's map. +class ReplacedNodes VALUE_OBJ_CLASS_SPEC { + private: + class ReplacedNode VALUE_OBJ_CLASS_SPEC { + private: + Node* _initial; + Node* _improved; + public: + ReplacedNode() : _initial(NULL), _improved(NULL) {} + ReplacedNode(Node* initial, Node* improved) : _initial(initial), _improved(improved) {} + Node* initial() const { return _initial; } + Node* improved() const { return _improved; } + + bool operator==(const ReplacedNode& other) { + return _initial == other._initial && _improved == other._improved; + } + }; + GrowableArray* _replaced_nodes; + + void allocate_if_necessary(); + bool has_node(const ReplacedNode& r) const; + bool has_target_node(Node* n) const; + + public: + ReplacedNodes() + : _replaced_nodes(NULL) {} + + void clone(); + void record(Node* initial, Node* improved); + void transfer_from(const ReplacedNodes& other, uint idx); + void reset(); + void apply(Node* n); + void merge_with(const ReplacedNodes& other); + bool is_empty() const; + void dump(outputStream *st) const; + void apply(Compile* C, Node* ctl); +}; + +#endif // SHARE_VM_OPTO_REPLACEDNODES_HPP --- ./hotspot/src/share/vm/opto/runtime.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/runtime.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -898,6 +898,74 @@ return TypeFunc::make(domain, range); } +/* + * void implCompress(byte[] buf, int ofs) + */ +const TypeFunc* OptoRuntime::sha_implCompress_Type() { + // create input type (domain) + int num_args = 2; + int argcnt = num_args; + const Type** fields = TypeTuple::fields(argcnt); + int argp = TypeFunc::Parms; + fields[argp++] = TypePtr::NOTNULL; // buf + fields[argp++] = TypePtr::NOTNULL; // state + assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); + + // no result type needed + fields = TypeTuple::fields(1); + fields[TypeFunc::Parms+0] = NULL; // void + const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); + return TypeFunc::make(domain, range); +} + +/* + * int implCompressMultiBlock(byte[] b, int ofs, int limit) + */ +const TypeFunc* OptoRuntime::digestBase_implCompressMB_Type() { + // create input type (domain) + int num_args = 4; + int argcnt = num_args; + const Type** fields = TypeTuple::fields(argcnt); + int argp = TypeFunc::Parms; + fields[argp++] = TypePtr::NOTNULL; // buf + fields[argp++] = TypePtr::NOTNULL; // state + fields[argp++] = TypeInt::INT; // ofs + fields[argp++] = TypeInt::INT; // limit + assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); + + // returning ofs (int) + fields = TypeTuple::fields(1); + fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs + const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); + return TypeFunc::make(domain, range); +} + +const TypeFunc* OptoRuntime::multiplyToLen_Type() { + // create input type (domain) + int num_args = 6; + int argcnt = num_args; + const Type** fields = TypeTuple::fields(argcnt); + int argp = TypeFunc::Parms; + fields[argp++] = TypePtr::NOTNULL; // x + fields[argp++] = TypeInt::INT; // xlen + fields[argp++] = TypePtr::NOTNULL; // y + fields[argp++] = TypeInt::INT; // ylen + fields[argp++] = TypePtr::NOTNULL; // z + fields[argp++] = TypeInt::INT; // zlen + assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); + + // no result type needed + fields = TypeTuple::fields(1); + fields[TypeFunc::Parms+0] = NULL; + const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); + return TypeFunc::make(domain, range); +} + + + //------------- Interpreter state access for on stack replacement const TypeFunc* OptoRuntime::osr_end_Type() { // create input type (domain) --- ./hotspot/src/share/vm/opto/runtime.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/runtime.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -300,6 +300,11 @@ static const TypeFunc* aescrypt_block_Type(); static const TypeFunc* cipherBlockChaining_aescrypt_Type(); + static const TypeFunc* sha_implCompress_Type(); + static const TypeFunc* digestBase_implCompressMB_Type(); + + static const TypeFunc* multiplyToLen_Type(); + static const TypeFunc* updateBytesCRC32_Type(); // leaf on stack replacement interpreter accessor types --- ./hotspot/src/share/vm/opto/subnode.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/subnode.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1147,12 +1147,10 @@ //------------------------------dump_spec------------------------------------- // Print special per-node info -#ifndef PRODUCT void BoolTest::dump_on(outputStream *st) const { const char *msg[] = {"eq","gt","of","lt","ne","le","nof","ge"}; st->print("%s", msg[_test]); } -#endif //============================================================================= uint BoolNode::hash() const { return (Node::hash() << 3)|(_test._test+1); } --- ./hotspot/src/share/vm/opto/subnode.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/subnode.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -275,9 +275,7 @@ mask commute( ) const { return mask("032147658"[_test]-'0'); } mask negate( ) const { return mask(_test^4); } bool is_canonical( ) const { return (_test == BoolTest::ne || _test == BoolTest::lt || _test == BoolTest::le || _test == BoolTest::overflow); } -#ifndef PRODUCT void dump_on(outputStream *st) const; -#endif }; //------------------------------BoolNode--------------------------------------- --- ./hotspot/src/share/vm/opto/superword.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/superword.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1374,6 +1374,20 @@ if (n->is_Load()) { Node* ctl = n->in(MemNode::Control); Node* mem = first->in(MemNode::Memory); + SWPointer p1(n->as_Mem(), this); + // Identify the memory dependency for the new loadVector node by + // walking up through memory chain. + // This is done to give flexibility to the new loadVector node so that + // it can move above independent storeVector nodes. + while (mem->is_StoreVector()) { + SWPointer p2(mem->as_Mem(), this); + int cmp = p1.cmp(p2); + if (SWPointer::not_equal(cmp) || !SWPointer::comparable(cmp)) { + mem = mem->in(MemNode::Memory); + } else { + break; // dependent memory + } + } Node* adr = low_adr->in(MemNode::Address); const TypePtr* atyp = n->adr_type(); vn = LoadVectorNode::make(C, opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n)); --- ./hotspot/src/share/vm/opto/type.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/opto/type.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -265,7 +265,7 @@ // locking. Arena* save = current->type_arena(); - Arena* shared_type_arena = new (mtCompiler)Arena(); + Arena* shared_type_arena = new (mtCompiler)Arena(mtCompiler); current->set_type_arena(shared_type_arena); _shared_type_dict = --- ./hotspot/src/share/vm/precompiled/precompiled.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/precompiled/precompiled.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -193,11 +193,13 @@ # include "runtime/mutexLocker.hpp" # include "runtime/objectMonitor.hpp" # include "runtime/orderAccess.hpp" +# include "runtime/orderAccess.inline.hpp" # include "runtime/os.hpp" # include "runtime/osThread.hpp" # include "runtime/perfData.hpp" # include "runtime/perfMemory.hpp" # include "runtime/prefetch.hpp" +# include "runtime/prefetch.inline.hpp" # include "runtime/reflection.hpp" # include "runtime/reflectionUtils.hpp" # include "runtime/registerMap.hpp" @@ -218,10 +220,17 @@ # include "runtime/vmThread.hpp" # include "runtime/vm_operations.hpp" # include "runtime/vm_version.hpp" +# include "services/allocationSite.hpp" # include "services/lowMemoryDetector.hpp" +# include "services/mallocTracker.hpp" +# include "services/memBaseline.hpp" # include "services/memoryPool.hpp" # include "services/memoryService.hpp" # include "services/memoryUsage.hpp" +# include "services/memReporter.hpp" +# include "services/memTracker.hpp" +# include "services/nmtCommon.hpp" +# include "services/virtualMemoryTracker.hpp" # include "utilities/accessFlags.hpp" # include "utilities/array.hpp" # include "utilities/bitMap.hpp" @@ -235,6 +244,7 @@ # include "utilities/hashtable.hpp" # include "utilities/histogram.hpp" # include "utilities/macros.hpp" +# include "utilities/nativeCallStack.hpp" # include "utilities/numberSeq.hpp" # include "utilities/ostream.hpp" # include "utilities/preserveException.hpp" --- ./hotspot/src/share/vm/prims/forte.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/forte.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -32,7 +32,7 @@ #include "oops/oop.inline2.hpp" #include "prims/forte.hpp" #include "runtime/javaCalls.hpp" -#include "runtime/thread.hpp" +#include "runtime/thread.inline.hpp" #include "runtime/vframe.hpp" #include "runtime/vframeArray.hpp" --- ./hotspot/src/share/vm/prims/jni.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jni.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -67,11 +67,13 @@ #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/reflection.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vm_operations.hpp" +#include "services/memTracker.hpp" #include "services/runtimeService.hpp" #include "trace/tracing.hpp" #include "utilities/defaultStream.hpp" @@ -291,15 +293,6 @@ "Bug in native code: jfieldID offset must address interior of object"); } -// Pick a reasonable higher bound for local capacity requested -// for EnsureLocalCapacity and PushLocalFrame. We don't want it too -// high because a test (or very unusual application) may try to allocate -// that many handles and run out of swap space. An implementation is -// permitted to allocate more handles than the ensured capacity, so this -// value is set high enough to prevent compatibility problems. -const int MAX_REASONABLE_LOCAL_CAPACITY = 4*K; - - // Wrapper to trace JNI functions #ifdef ASSERT @@ -879,7 +872,8 @@ env, capacity); #endif /* USDT2 */ //%note jni_11 - if (capacity < 0 || capacity > MAX_REASONABLE_LOCAL_CAPACITY) { + if (capacity < 0 || + ((MaxJNILocalCapacity > 0) && (capacity > MaxJNILocalCapacity))) { #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, JNI_ERR); #else /* USDT2 */ @@ -1038,7 +1032,8 @@ env, capacity); #endif /* USDT2 */ jint ret; - if (capacity >= 0 && capacity <= MAX_REASONABLE_LOCAL_CAPACITY) { + if (capacity >= 0 && + ((MaxJNILocalCapacity <= 0) || (capacity <= MaxJNILocalCapacity))) { ret = JNI_OK; } else { ret = JNI_ERR; @@ -3588,6 +3583,7 @@ if (bad_address != NULL) { os::protect_memory(bad_address, size, os::MEM_PROT_READ, /*is_committed*/false); + MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal); } } return bad_address; @@ -5063,6 +5059,7 @@ #if INCLUDE_ALL_GCS #include "gc_implementation/g1/heapRegionRemSet.hpp" #endif +#include "memory/guardedMemory.hpp" #include "utilities/quickSort.hpp" #include "utilities/ostream.hpp" #if INCLUDE_VM_STRUCTS @@ -5081,10 +5078,15 @@ void TestMetachunk_test(); void TestVirtualSpaceNode_test(); void TestNewSize_test(); +void TestKlass_test(); +void Test_linked_list(); +void TestChunkedList_test(); #if INCLUDE_ALL_GCS void TestOldFreeSpaceCalculation_test(); void TestG1BiasedArray_test(); +void TestBufferingOopClosure_test(); void TestCodeCacheRemSet_test(); +void FreeRegionList_test(); #endif void execute_internal_vm_tests() { @@ -5101,9 +5103,13 @@ run_unit_test(arrayOopDesc::test_max_array_length()); run_unit_test(CollectedHeap::test_is_in()); run_unit_test(QuickSort::test_quick_sort()); + run_unit_test(GuardedMemory::test_guarded_memory()); run_unit_test(AltHashing::test_alt_hash()); run_unit_test(test_loggc_filename()); run_unit_test(TestNewSize_test()); + run_unit_test(TestKlass_test()); + run_unit_test(Test_linked_list()); + run_unit_test(TestChunkedList_test()); #if INCLUDE_VM_STRUCTS run_unit_test(VMStructs::test()); #endif @@ -5111,7 +5117,11 @@ run_unit_test(TestOldFreeSpaceCalculation_test()); run_unit_test(TestG1BiasedArray_test()); run_unit_test(HeapRegionRemSet::test_prt()); + run_unit_test(TestBufferingOopClosure_test()); run_unit_test(TestCodeCacheRemSet_test()); + if (UseG1GC) { + run_unit_test(FreeRegionList_test()); + } #endif tty->print_cr("All internal VM tests passed"); } --- ./hotspot/src/share/vm/prims/jniCheck.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jniCheck.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" +#include "memory/guardedMemory.hpp" #include "oops/instanceKlass.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" @@ -35,7 +36,7 @@ #include "runtime/handles.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/jfieldIDWorkaround.hpp" -#include "runtime/thread.hpp" +#include "runtime/thread.inline.hpp" #ifdef TARGET_ARCH_x86 # include "jniTypes_x86.hpp" #endif @@ -323,6 +324,74 @@ } } +/* + * Copy and wrap array elements for bounds checking. + * Remember the original elements (GuardedMemory::get_tag()) + */ +static void* check_jni_wrap_copy_array(JavaThread* thr, jarray array, + void* orig_elements) { + void* result; + IN_VM( + oop a = JNIHandles::resolve_non_null(array); + size_t len = arrayOop(a)->length() << + TypeArrayKlass::cast(a->klass())->log2_element_size(); + result = GuardedMemory::wrap_copy(orig_elements, len, orig_elements); + ) + return result; +} + +static void* check_wrapped_array(JavaThread* thr, const char* fn_name, + void* obj, void* carray, size_t* rsz) { + if (carray == NULL) { + tty->print_cr("%s: elements vector NULL" PTR_FORMAT, fn_name, p2i(obj)); + NativeReportJNIFatalError(thr, "Elements vector NULL"); + } + GuardedMemory guarded(carray); + void* orig_result = guarded.get_tag(); + if (!guarded.verify_guards()) { + tty->print_cr("ReleasePrimitiveArrayCritical: release array failed bounds " + "check, incorrect pointer returned ? array: " PTR_FORMAT " carray: " + PTR_FORMAT, p2i(obj), p2i(carray)); + guarded.print_on(tty); + NativeReportJNIFatalError(thr, "ReleasePrimitiveArrayCritical: " + "failed bounds check"); + } + if (orig_result == NULL) { + tty->print_cr("ReleasePrimitiveArrayCritical: unrecognized elements. array: " + PTR_FORMAT " carray: " PTR_FORMAT, p2i(obj), p2i(carray)); + guarded.print_on(tty); + NativeReportJNIFatalError(thr, "ReleasePrimitiveArrayCritical: " + "unrecognized elements"); + } + if (rsz != NULL) { + *rsz = guarded.get_user_size(); + } + return orig_result; +} + +static void* check_wrapped_array_release(JavaThread* thr, const char* fn_name, + void* obj, void* carray, jint mode) { + size_t sz; + void* orig_result = check_wrapped_array(thr, fn_name, obj, carray, &sz); + switch (mode) { + case 0: + memcpy(orig_result, carray, sz); + GuardedMemory::free_copy(carray); + break; + case JNI_COMMIT: + memcpy(orig_result, carray, sz); + break; + case JNI_ABORT: + GuardedMemory::free_copy(carray); + break; + default: + tty->print_cr("%s: Unrecognized mode %i releasing array " + PTR_FORMAT " elements " PTR_FORMAT, fn_name, mode, p2i(obj), p2i(carray)); + NativeReportJNIFatalError(thr, "Unrecognized array release mode"); + } + return orig_result; +} + oop jniCheck::validate_handle(JavaThread* thr, jobject obj) { if (JNIHandles::is_frame_handle(thr, obj) || JNIHandles::is_local_handle(thr, obj) || @@ -1314,7 +1383,7 @@ JNI_END // Arbitrary (but well-known) tag -const jint STRING_TAG = 0x47114711; +const void* STRING_TAG = (void*)0x47114711; JNI_ENTRY_CHECKED(const jchar *, checked_jni_GetStringChars(JNIEnv *env, @@ -1324,21 +1393,22 @@ IN_VM( checkString(thr, str); ) - jchar* newResult = NULL; + jchar* new_result = NULL; const jchar *result = UNCHECKED()->GetStringChars(env,str,isCopy); assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringChars didn't return a copy as expected"); if (result != NULL) { size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination - jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal); - *tagLocation = STRING_TAG; - newResult = (jchar*) (tagLocation + 1); - memcpy(newResult, result, len * sizeof(jchar)); + len *= sizeof(jchar); + new_result = (jchar*) GuardedMemory::wrap_copy(result, len, STRING_TAG); + if (new_result == NULL) { + vm_exit_out_of_memory(len, OOM_MALLOC_ERROR, "checked_jni_GetStringChars"); + } // Avoiding call to UNCHECKED()->ReleaseStringChars() since that will fire unexpected dtrace probes // Note that the dtrace arguments for the allocated memory will not match up with this solution. FreeHeap((char*)result); } functionExit(env); - return newResult; + return new_result; JNI_END JNI_ENTRY_CHECKED(void, @@ -1354,11 +1424,23 @@ UNCHECKED()->ReleaseStringChars(env,str,chars); } else { - jint* tagLocation = ((jint*) chars) - 1; - if (*tagLocation != STRING_TAG) { - NativeReportJNIFatalError(thr, "ReleaseStringChars called on something not allocated by GetStringChars"); - } - UNCHECKED()->ReleaseStringChars(env,str,(const jchar*)tagLocation); + GuardedMemory guarded((void*)chars); + if (!guarded.verify_guards()) { + tty->print_cr("ReleaseStringChars: release chars failed bounds check. " + "string: " PTR_FORMAT " chars: " PTR_FORMAT, p2i(str), p2i(chars)); + guarded.print_on(tty); + NativeReportJNIFatalError(thr, "ReleaseStringChars: " + "release chars failed bounds check."); + } + if (guarded.get_tag() != STRING_TAG) { + tty->print_cr("ReleaseStringChars: called on something not allocated " + "by GetStringChars. string: " PTR_FORMAT " chars: " PTR_FORMAT, + p2i(str), p2i(chars)); + NativeReportJNIFatalError(thr, "ReleaseStringChars called on something " + "not allocated by GetStringChars"); + } + UNCHECKED()->ReleaseStringChars(env, str, + (const jchar*) guarded.release_for_freeing()); } functionExit(env); JNI_END @@ -1385,7 +1467,7 @@ JNI_END // Arbitrary (but well-known) tag - different than GetStringChars -const jint STRING_UTF_TAG = 0x48124812; +const void* STRING_UTF_TAG = (void*) 0x48124812; JNI_ENTRY_CHECKED(const char *, checked_jni_GetStringUTFChars(JNIEnv *env, @@ -1395,21 +1477,21 @@ IN_VM( checkString(thr, str); ) - char* newResult = NULL; + char* new_result = NULL; const char *result = UNCHECKED()->GetStringUTFChars(env,str,isCopy); assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringUTFChars didn't return a copy as expected"); if (result != NULL) { size_t len = strlen(result) + 1; // + 1 for NULL termination - jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal); - *tagLocation = STRING_UTF_TAG; - newResult = (char*) (tagLocation + 1); - strcpy(newResult, result); + new_result = (char*) GuardedMemory::wrap_copy(result, len, STRING_UTF_TAG); + if (new_result == NULL) { + vm_exit_out_of_memory(len, OOM_MALLOC_ERROR, "checked_jni_GetStringUTFChars"); + } // Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes // Note that the dtrace arguments for the allocated memory will not match up with this solution. FreeHeap((char*)result, mtInternal); } functionExit(env); - return newResult; + return new_result; JNI_END JNI_ENTRY_CHECKED(void, @@ -1425,11 +1507,23 @@ UNCHECKED()->ReleaseStringUTFChars(env,str,chars); } else { - jint* tagLocation = ((jint*) chars) - 1; - if (*tagLocation != STRING_UTF_TAG) { - NativeReportJNIFatalError(thr, "ReleaseStringUTFChars called on something not allocated by GetStringUTFChars"); - } - UNCHECKED()->ReleaseStringUTFChars(env,str,(const char*)tagLocation); + GuardedMemory guarded((void*)chars); + if (!guarded.verify_guards()) { + tty->print_cr("ReleaseStringUTFChars: release chars failed bounds check. " + "string: " PTR_FORMAT " chars: " PTR_FORMAT, p2i(str), p2i(chars)); + guarded.print_on(tty); + NativeReportJNIFatalError(thr, "ReleaseStringUTFChars: " + "release chars failed bounds check."); + } + if (guarded.get_tag() != STRING_UTF_TAG) { + tty->print_cr("ReleaseStringUTFChars: called on something not " + "allocated by GetStringUTFChars. string: " PTR_FORMAT " chars: " + PTR_FORMAT, p2i(str), p2i(chars)); + NativeReportJNIFatalError(thr, "ReleaseStringUTFChars " + "called on something not allocated by GetStringUTFChars"); + } + UNCHECKED()->ReleaseStringUTFChars(env, str, + (const char*) guarded.release_for_freeing()); } functionExit(env); JNI_END @@ -1514,6 +1608,9 @@ ElementType *result = UNCHECKED()->Get##Result##ArrayElements(env, \ array, \ isCopy); \ + if (result != NULL) { \ + result = (ElementType *) check_jni_wrap_copy_array(thr, array, result); \ + } \ functionExit(env); \ return result; \ JNI_END @@ -1538,12 +1635,10 @@ check_primitive_array_type(thr, array, ElementTag); \ ASSERT_OOPS_ALLOWED; \ typeArrayOop a = typeArrayOop(JNIHandles::resolve_non_null(array)); \ - /* cannot check validity of copy, unless every request is logged by - * checking code. Implementation of this check is deferred until a - * subsequent release. - */ \ ) \ - UNCHECKED()->Release##Result##ArrayElements(env,array,elems,mode); \ + ElementType* orig_result = (ElementType *) check_wrapped_array_release( \ + thr, "checked_jni_Release"#Result"ArrayElements", array, elems, mode); \ + UNCHECKED()->Release##Result##ArrayElements(env, array, orig_result, mode); \ functionExit(env); \ JNI_END @@ -1694,6 +1789,9 @@ check_is_primitive_array(thr, array); ) void *result = UNCHECKED()->GetPrimitiveArrayCritical(env, array, isCopy); + if (result != NULL) { + result = check_jni_wrap_copy_array(thr, array, result); + } functionExit(env); return result; JNI_END @@ -1707,10 +1805,9 @@ IN_VM( check_is_primitive_array(thr, array); ) - /* The Hotspot JNI code does not use the parameters, so just check the - * array parameter as a minor sanity check - */ - UNCHECKED()->ReleasePrimitiveArrayCritical(env, array, carray, mode); + // Check the element array... + void* orig_result = check_wrapped_array_release(thr, "ReleasePrimitiveArrayCritical", array, carray, mode); + UNCHECKED()->ReleasePrimitiveArrayCritical(env, array, orig_result, mode); functionExit(env); JNI_END --- ./hotspot/src/share/vm/prims/jvm.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvm.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,10 +24,15 @@ #include "precompiled.hpp" #include "classfile/classLoader.hpp" +#include "classfile/classLoaderExt.hpp" #include "classfile/javaAssertions.hpp" #include "classfile/javaClasses.hpp" #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" +#if INCLUDE_CDS +#include "classfile/sharedClassUtil.hpp" +#include "classfile/systemDictionaryShared.hpp" +#endif #include "classfile/vmSymbols.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "interpreter/bytecode.hpp" @@ -51,6 +56,7 @@ #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" #include "runtime/perfData.hpp" #include "runtime/reflection.hpp" @@ -388,6 +394,14 @@ } } + const char* enableSharedLookupCache = "false"; +#if INCLUDE_CDS + if (ClassLoaderExt::is_lookup_cache_enabled()) { + enableSharedLookupCache = "true"; + } +#endif + PUTPROP(props, "sun.cds.enableSharedLookupCache", enableSharedLookupCache); + return properties; JVM_END @@ -589,13 +603,14 @@ // Make shallow object copy const int size = obj->size(); - oop new_obj = NULL; + oop new_obj_oop = NULL; if (obj->is_array()) { const int length = ((arrayOop)obj())->length(); - new_obj = CollectedHeap::array_allocate(klass, size, length, CHECK_NULL); + new_obj_oop = CollectedHeap::array_allocate(klass, size, length, CHECK_NULL); } else { - new_obj = CollectedHeap::obj_allocate(klass, size, CHECK_NULL); + new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL); } + // 4839641 (4840070): We must do an oop-atomic copy, because if another thread // is modifying a reference field in the clonee, a non-oop-atomic copy might // be suspended in the middle of copying the pointer and end up with parts @@ -606,24 +621,41 @@ // The same is true of StubRoutines::object_copy and the various oop_copy // variants, and of the code generated by the inline_native_clone intrinsic. assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned"); - Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj, + Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj_oop, (size_t)align_object_size(size) / HeapWordsPerLong); // Clear the header - new_obj->init_mark(); + new_obj_oop->init_mark(); // Store check (mark entire object and let gc sort it out) BarrierSet* bs = Universe::heap()->barrier_set(); assert(bs->has_write_region_opt(), "Barrier set does not have write_region"); - bs->write_region(MemRegion((HeapWord*)new_obj, size)); + bs->write_region(MemRegion((HeapWord*)new_obj_oop, size)); + + Handle new_obj(THREAD, new_obj_oop); + // Special handling for MemberNames. Since they contain Method* metadata, they + // must be registered so that RedefineClasses can fix metadata contained in them. + if (java_lang_invoke_MemberName::is_instance(new_obj()) && + java_lang_invoke_MemberName::is_method(new_obj())) { + Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(new_obj()); + // MemberName may be unresolved, so doesn't need registration until resolved. + if (method != NULL) { + methodHandle m(THREAD, method); + // This can safepoint and redefine method, so need both new_obj and method + // in a handle, for two different reasons. new_obj can move, method can be + // deleted if nothing is using it on the stack. + m->method_holder()->add_member_name(new_obj()); + } + } // Caution: this involves a java upcall, so the clone should be // "gc-robust" by this stage. if (klass->has_finalizer()) { assert(obj->is_instance(), "should be instanceOop"); - new_obj = InstanceKlass::register_finalizer(instanceOop(new_obj), CHECK_NULL); + new_obj_oop = InstanceKlass::register_finalizer(instanceOop(new_obj()), CHECK_NULL); + new_obj = Handle(THREAD, new_obj_oop); } - return JNIHandles::make_local(env, oop(new_obj)); + return JNIHandles::make_local(env, new_obj()); JVM_END // java.lang.Compiler //////////////////////////////////////////////////// @@ -761,6 +793,36 @@ JVM_END +JVM_ENTRY(jboolean, JVM_KnownToNotExist(JNIEnv *env, jobject loader, const char *classname)) + JVMWrapper("JVM_KnownToNotExist"); +#if INCLUDE_CDS + return ClassLoaderExt::known_to_not_exist(env, loader, classname, CHECK_(false)); +#else + return false; +#endif +JVM_END + + +JVM_ENTRY(jobjectArray, JVM_GetResourceLookupCacheURLs(JNIEnv *env, jobject loader)) + JVMWrapper("JVM_GetResourceLookupCacheURLs"); +#if INCLUDE_CDS + return ClassLoaderExt::get_lookup_cache_urls(env, loader, CHECK_NULL); +#else + return NULL; +#endif +JVM_END + + +JVM_ENTRY(jintArray, JVM_GetResourceLookupCache(JNIEnv *env, jobject loader, const char *resource_name)) + JVMWrapper("JVM_GetResourceLookupCache"); +#if INCLUDE_CDS + return ClassLoaderExt::get_lookup_cache(env, loader, resource_name, CHECK_NULL); +#else + return NULL; +#endif +JVM_END + + // Returns a class loaded by the bootstrap class loader; or null // if not found. ClassNotFoundException is not thrown. // @@ -1032,7 +1094,15 @@ h_loader, Handle(), CHECK_NULL); - +#if INCLUDE_CDS + if (k == NULL) { + // If the class is not already loaded, try to see if it's in the shared + // archive for the current classloader (h_loader). + instanceKlassHandle ik = SystemDictionaryShared::find_or_load_shared_class( + klass_name, h_loader, CHECK_NULL); + k = ik(); + } +#endif return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror()); JVM_END @@ -4474,7 +4544,7 @@ JVM_ENTRY(void, JVM_GetVersionInfo(JNIEnv* env, jvm_version_info* info, size_t info_size)) { - memset(info, 0, sizeof(info_size)); + memset(info, 0, info_size); info->jvm_version = Abstract_VM_Version::jvm_version(); info->update_version = 0; /* 0 in HotSpot Express VM */ --- ./hotspot/src/share/vm/prims/jvm.h Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvm.h Wed Feb 04 12:14:39 2015 -0800 @@ -1548,6 +1548,31 @@ JNIEXPORT jobjectArray JNICALL JVM_GetThreadStateNames(JNIEnv* env, jint javaThreadState, jintArray values); +/* + * Returns true if the JVM's lookup cache indicates that this class is + * known to NOT exist for the given loader. + */ +JNIEXPORT jboolean JNICALL +JVM_KnownToNotExist(JNIEnv *env, jobject loader, const char *classname); + +/* + * Returns an array of all URLs that are stored in the JVM's lookup cache + * for the given loader. NULL if the lookup cache is unavailable. + */ +JNIEXPORT jobjectArray JNICALL +JVM_GetResourceLookupCacheURLs(JNIEnv *env, jobject loader); + +/* + * Returns an array of all URLs that *may* contain the resource_name for the + * given loader. This function returns an integer array, each element + * of which can be used to index into the array returned by + * JVM_GetResourceLookupCacheURLs of the same loader to determine the + * URLs. + */ +JNIEXPORT jintArray JNICALL +JVM_GetResourceLookupCache(JNIEnv *env, jobject loader, const char *resource_name); + + /* ========================================================================= * The following defines a private JVM interface that the JDK can query * for the JVM version and capabilities. sun.misc.Version defines --- ./hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -54,6 +54,7 @@ void JvmtiClassFileReconstituter::write_field_infos() { HandleMark hm(thread()); Array* fields_anno = ikh()->fields_annotations(); + Array* fields_type_anno = ikh()->fields_type_annotations(); // Compute the real number of Java fields int java_fields = ikh()->java_fields_count(); @@ -68,6 +69,7 @@ // int offset = ikh()->field_offset( index ); int generic_signature_index = fs.generic_signature_index(); AnnotationArray* anno = fields_anno == NULL ? NULL : fields_anno->at(fs.index()); + AnnotationArray* type_anno = fields_type_anno == NULL ? NULL : fields_type_anno->at(fs.index()); // JVMSpec| field_info { // JVMSpec| u2 access_flags; @@ -93,6 +95,9 @@ if (anno != NULL) { ++attr_count; // has RuntimeVisibleAnnotations attribute } + if (type_anno != NULL) { + ++attr_count; // has RuntimeVisibleTypeAnnotations attribute + } write_u2(attr_count); @@ -110,6 +115,9 @@ if (anno != NULL) { write_annotations_attribute("RuntimeVisibleAnnotations", anno); } + if (type_anno != NULL) { + write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno); + } } } @@ -550,6 +558,7 @@ AnnotationArray* anno = method->annotations(); AnnotationArray* param_anno = method->parameter_annotations(); AnnotationArray* default_anno = method->annotation_default(); + AnnotationArray* type_anno = method->type_annotations(); // skip generated default interface methods if (method->is_overpass()) { @@ -585,6 +594,9 @@ if (param_anno != NULL) { ++attr_count; // has RuntimeVisibleParameterAnnotations attribute } + if (type_anno != NULL) { + ++attr_count; // has RuntimeVisibleTypeAnnotations attribute + } write_u2(attr_count); if (const_method->code_size() > 0) { @@ -609,6 +621,9 @@ if (param_anno != NULL) { write_annotations_attribute("RuntimeVisibleParameterAnnotations", param_anno); } + if (type_anno != NULL) { + write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno); + } } // Write the class attributes portion of ClassFile structure @@ -618,6 +633,7 @@ u2 inner_classes_length = inner_classes_attribute_length(); Symbol* generic_signature = ikh()->generic_signature(); AnnotationArray* anno = ikh()->class_annotations(); + AnnotationArray* type_anno = ikh()->class_type_annotations(); int attr_count = 0; if (generic_signature != NULL) { @@ -635,6 +651,9 @@ if (anno != NULL) { ++attr_count; // has RuntimeVisibleAnnotations attribute } + if (type_anno != NULL) { + ++attr_count; // has RuntimeVisibleTypeAnnotations attribute + } if (cpool()->operands() != NULL) { ++attr_count; } @@ -656,6 +675,9 @@ if (anno != NULL) { write_annotations_attribute("RuntimeVisibleAnnotations", anno); } + if (type_anno != NULL) { + write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno); + } if (cpool()->operands() != NULL) { write_bootstrapmethod_attribute(); } --- ./hotspot/src/share/vm/prims/jvmtiEnv.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvmtiEnv.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "classfile/classLoaderExt.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "interpreter/bytecodeStream.hpp" @@ -475,7 +476,7 @@ if (TraceClassLoading) { tty->print_cr("[Opened %s]", zip_entry->name()); } - ClassLoader::add_to_list(zip_entry); + ClassLoaderExt::append_boot_classpath(zip_entry); return JVMTI_ERROR_NONE; } else { return JVMTI_ERROR_WRONG_PHASE; --- ./hotspot/src/share/vm/prims/jvmtiEnvBase.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvmtiEnvBase.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -41,6 +41,7 @@ #include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/signature.hpp" +#include "runtime/thread.inline.hpp" #include "runtime/vframe.hpp" #include "runtime/vframe_hp.hpp" #include "runtime/vmThread.hpp" --- ./hotspot/src/share/vm/prims/jvmtiExport.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvmtiExport.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -47,7 +47,7 @@ #include "runtime/interfaceSupport.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.inline.hpp" -#include "runtime/thread.hpp" +#include "runtime/thread.inline.hpp" #include "runtime/vframe.hpp" #include "services/attachListener.hpp" #include "services/serviceUtil.hpp" --- ./hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,7 +25,8 @@ #include "precompiled.hpp" #include "prims/jvmtiRawMonitor.hpp" #include "runtime/interfaceSupport.hpp" -#include "runtime/thread.hpp" +#include "runtime/orderAccess.inline.hpp" +#include "runtime/thread.inline.hpp" GrowableArray *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(1,true); --- ./hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -135,7 +135,7 @@ // Mark methods seen on stack and everywhere else so old methods are not // cleaned up if they're on the stack. - MetadataOnStackMark md_on_stack; + MetadataOnStackMark md_on_stack(true); HandleMark hm(thread); // make sure any handles created are deleted // before the stack walk again. @@ -1569,6 +1569,29 @@ return false; } + // rewrite constant pool references in the class_type_annotations: + if (!rewrite_cp_refs_in_class_type_annotations(scratch_class, THREAD)) { + // propagate failure back to caller + return false; + } + + // rewrite constant pool references in the fields_type_annotations: + if (!rewrite_cp_refs_in_fields_type_annotations(scratch_class, THREAD)) { + // propagate failure back to caller + return false; + } + + // rewrite constant pool references in the methods_type_annotations: + if (!rewrite_cp_refs_in_methods_type_annotations(scratch_class, THREAD)) { + // propagate failure back to caller + return false; + } + + // There can be type annotations in the Code part of a method_info attribute. + // These annotations are not accessible, even by reflection. + // Currently they are not even parsed by the ClassFileParser. + // If runtime access is added they will also need to be rewritten. + // rewrite source file name index: u2 source_file_name_idx = scratch_class->source_file_name_index(); if (source_file_name_idx != 0) { @@ -2239,6 +2262,588 @@ } // end rewrite_cp_refs_in_methods_default_annotations() +// Rewrite constant pool references in a class_type_annotations field. +bool VM_RedefineClasses::rewrite_cp_refs_in_class_type_annotations( + instanceKlassHandle scratch_class, TRAPS) { + + AnnotationArray* class_type_annotations = scratch_class->class_type_annotations(); + if (class_type_annotations == NULL || class_type_annotations->length() == 0) { + // no class_type_annotations so nothing to do + return true; + } + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("class_type_annotations length=%d", class_type_annotations->length())); + + int byte_i = 0; // byte index into class_type_annotations + return rewrite_cp_refs_in_type_annotations_typeArray(class_type_annotations, + byte_i, "ClassFile", THREAD); +} // end rewrite_cp_refs_in_class_type_annotations() + + +// Rewrite constant pool references in a fields_type_annotations field. +bool VM_RedefineClasses::rewrite_cp_refs_in_fields_type_annotations( + instanceKlassHandle scratch_class, TRAPS) { + + Array* fields_type_annotations = scratch_class->fields_type_annotations(); + if (fields_type_annotations == NULL || fields_type_annotations->length() == 0) { + // no fields_type_annotations so nothing to do + return true; + } + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("fields_type_annotations length=%d", fields_type_annotations->length())); + + for (int i = 0; i < fields_type_annotations->length(); i++) { + AnnotationArray* field_type_annotations = fields_type_annotations->at(i); + if (field_type_annotations == NULL || field_type_annotations->length() == 0) { + // this field does not have any annotations so skip it + continue; + } + + int byte_i = 0; // byte index into field_type_annotations + if (!rewrite_cp_refs_in_type_annotations_typeArray(field_type_annotations, + byte_i, "field_info", THREAD)) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("bad field_type_annotations at %d", i)); + // propagate failure back to caller + return false; + } + } + + return true; +} // end rewrite_cp_refs_in_fields_type_annotations() + + +// Rewrite constant pool references in a methods_type_annotations field. +bool VM_RedefineClasses::rewrite_cp_refs_in_methods_type_annotations( + instanceKlassHandle scratch_class, TRAPS) { + + for (int i = 0; i < scratch_class->methods()->length(); i++) { + Method* m = scratch_class->methods()->at(i); + AnnotationArray* method_type_annotations = m->constMethod()->type_annotations(); + + if (method_type_annotations == NULL || method_type_annotations->length() == 0) { + // this method does not have any annotations so skip it + continue; + } + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("methods type_annotations length=%d", method_type_annotations->length())); + + int byte_i = 0; // byte index into method_type_annotations + if (!rewrite_cp_refs_in_type_annotations_typeArray(method_type_annotations, + byte_i, "method_info", THREAD)) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("bad method_type_annotations at %d", i)); + // propagate failure back to caller + return false; + } + } + + return true; +} // end rewrite_cp_refs_in_methods_type_annotations() + + +// Rewrite constant pool references in a type_annotations +// field. This "structure" is adapted from the +// RuntimeVisibleTypeAnnotations_attribute described in +// section 4.7.20 of the Java SE 8 Edition of the VM spec: +// +// type_annotations_typeArray { +// u2 num_annotations; +// type_annotation annotations[num_annotations]; +// } +// +bool VM_RedefineClasses::rewrite_cp_refs_in_type_annotations_typeArray( + AnnotationArray* type_annotations_typeArray, int &byte_i_ref, + const char * location_mesg, TRAPS) { + + if ((byte_i_ref + 2) > type_annotations_typeArray->length()) { + // not enough room for num_annotations field + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for num_annotations field")); + return false; + } + + u2 num_annotations = Bytes::get_Java_u2((address) + type_annotations_typeArray->adr_at(byte_i_ref)); + byte_i_ref += 2; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("num_type_annotations=%d", num_annotations)); + + int calc_num_annotations = 0; + for (; calc_num_annotations < num_annotations; calc_num_annotations++) { + if (!rewrite_cp_refs_in_type_annotation_struct(type_annotations_typeArray, + byte_i_ref, location_mesg, THREAD)) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("bad type_annotation_struct at %d", calc_num_annotations)); + // propagate failure back to caller + return false; + } + } + assert(num_annotations == calc_num_annotations, "sanity check"); + + if (byte_i_ref != type_annotations_typeArray->length()) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("read wrong amount of bytes at end of processing " + "type_annotations_typeArray (%d of %d bytes were read)", + byte_i_ref, type_annotations_typeArray->length())); + return false; + } + + return true; +} // end rewrite_cp_refs_in_type_annotations_typeArray() + + +// Rewrite constant pool references in a type_annotation +// field. This "structure" is adapted from the +// RuntimeVisibleTypeAnnotations_attribute described in +// section 4.7.20 of the Java SE 8 Edition of the VM spec: +// +// type_annotation { +// u1 target_type; +// union { +// type_parameter_target; +// supertype_target; +// type_parameter_bound_target; +// empty_target; +// method_formal_parameter_target; +// throws_target; +// localvar_target; +// catch_target; +// offset_target; +// type_argument_target; +// } target_info; +// type_path target_path; +// annotation anno; +// } +// +bool VM_RedefineClasses::rewrite_cp_refs_in_type_annotation_struct( + AnnotationArray* type_annotations_typeArray, int &byte_i_ref, + const char * location_mesg, TRAPS) { + + if (!skip_type_annotation_target(type_annotations_typeArray, + byte_i_ref, location_mesg, THREAD)) { + return false; + } + + if (!skip_type_annotation_type_path(type_annotations_typeArray, + byte_i_ref, THREAD)) { + return false; + } + + if (!rewrite_cp_refs_in_annotation_struct(type_annotations_typeArray, + byte_i_ref, THREAD)) { + return false; + } + + return true; +} // end rewrite_cp_refs_in_type_annotation_struct() + + +// Read, verify and skip over the target_type and target_info part +// so that rewriting can continue in the later parts of the struct. +// +// u1 target_type; +// union { +// type_parameter_target; +// supertype_target; +// type_parameter_bound_target; +// empty_target; +// method_formal_parameter_target; +// throws_target; +// localvar_target; +// catch_target; +// offset_target; +// type_argument_target; +// } target_info; +// +bool VM_RedefineClasses::skip_type_annotation_target( + AnnotationArray* type_annotations_typeArray, int &byte_i_ref, + const char * location_mesg, TRAPS) { + + if ((byte_i_ref + 1) > type_annotations_typeArray->length()) { + // not enough room for a target_type let alone the rest of a type_annotation + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a target_type")); + return false; + } + + u1 target_type = type_annotations_typeArray->at(byte_i_ref); + byte_i_ref += 1; + RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("target_type=0x%.2x", target_type)); + RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("location=%s", location_mesg)); + + // Skip over target_info + switch (target_type) { + case 0x00: + // kind: type parameter declaration of generic class or interface + // location: ClassFile + case 0x01: + // kind: type parameter declaration of generic method or constructor + // location: method_info + + { + // struct: + // type_parameter_target { + // u1 type_parameter_index; + // } + // + if ((byte_i_ref + 1) > type_annotations_typeArray->length()) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a type_parameter_target")); + return false; + } + + u1 type_parameter_index = type_annotations_typeArray->at(byte_i_ref); + byte_i_ref += 1; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("type_parameter_target: type_parameter_index=%d", + type_parameter_index)); + } break; + + case 0x10: + // kind: type in extends clause of class or interface declaration + // (including the direct superclass of an anonymous class declaration), + // or in implements clause of interface declaration + // location: ClassFile + + { + // struct: + // supertype_target { + // u2 supertype_index; + // } + // + if ((byte_i_ref + 2) > type_annotations_typeArray->length()) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a supertype_target")); + return false; + } + + u2 supertype_index = Bytes::get_Java_u2((address) + type_annotations_typeArray->adr_at(byte_i_ref)); + byte_i_ref += 2; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("supertype_target: supertype_index=%d", supertype_index)); + } break; + + case 0x11: + // kind: type in bound of type parameter declaration of generic class or interface + // location: ClassFile + case 0x12: + // kind: type in bound of type parameter declaration of generic method or constructor + // location: method_info + + { + // struct: + // type_parameter_bound_target { + // u1 type_parameter_index; + // u1 bound_index; + // } + // + if ((byte_i_ref + 2) > type_annotations_typeArray->length()) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a type_parameter_bound_target")); + return false; + } + + u1 type_parameter_index = type_annotations_typeArray->at(byte_i_ref); + byte_i_ref += 1; + u1 bound_index = type_annotations_typeArray->at(byte_i_ref); + byte_i_ref += 1; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("type_parameter_bound_target: type_parameter_index=%d, bound_index=%d", + type_parameter_index, bound_index)); + } break; + + case 0x13: + // kind: type in field declaration + // location: field_info + case 0x14: + // kind: return type of method, or type of newly constructed object + // location: method_info + case 0x15: + // kind: receiver type of method or constructor + // location: method_info + + { + // struct: + // empty_target { + // } + // + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("empty_target")); + } break; + + case 0x16: + // kind: type in formal parameter declaration of method, constructor, or lambda expression + // location: method_info + + { + // struct: + // formal_parameter_target { + // u1 formal_parameter_index; + // } + // + if ((byte_i_ref + 1) > type_annotations_typeArray->length()) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a formal_parameter_target")); + return false; + } + + u1 formal_parameter_index = type_annotations_typeArray->at(byte_i_ref); + byte_i_ref += 1; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("formal_parameter_target: formal_parameter_index=%d", + formal_parameter_index)); + } break; + + case 0x17: + // kind: type in throws clause of method or constructor + // location: method_info + + { + // struct: + // throws_target { + // u2 throws_type_index + // } + // + if ((byte_i_ref + 2) > type_annotations_typeArray->length()) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a throws_target")); + return false; + } + + u2 throws_type_index = Bytes::get_Java_u2((address) + type_annotations_typeArray->adr_at(byte_i_ref)); + byte_i_ref += 2; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("throws_target: throws_type_index=%d", throws_type_index)); + } break; + + case 0x40: + // kind: type in local variable declaration + // location: Code + case 0x41: + // kind: type in resource variable declaration + // location: Code + + { + // struct: + // localvar_target { + // u2 table_length; + // struct { + // u2 start_pc; + // u2 length; + // u2 index; + // } table[table_length]; + // } + // + if ((byte_i_ref + 2) > type_annotations_typeArray->length()) { + // not enough room for a table_length let alone the rest of a localvar_target + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a localvar_target table_length")); + return false; + } + + u2 table_length = Bytes::get_Java_u2((address) + type_annotations_typeArray->adr_at(byte_i_ref)); + byte_i_ref += 2; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("localvar_target: table_length=%d", table_length)); + + int table_struct_size = 2 + 2 + 2; // 3 u2 variables per table entry + int table_size = table_length * table_struct_size; + + if ((byte_i_ref + table_size) > type_annotations_typeArray->length()) { + // not enough room for a table + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a table array of length %d", table_length)); + return false; + } + + // Skip over table + byte_i_ref += table_size; + } break; + + case 0x42: + // kind: type in exception parameter declaration + // location: Code + + { + // struct: + // catch_target { + // u2 exception_table_index; + // } + // + if ((byte_i_ref + 2) > type_annotations_typeArray->length()) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a catch_target")); + return false; + } + + u2 exception_table_index = Bytes::get_Java_u2((address) + type_annotations_typeArray->adr_at(byte_i_ref)); + byte_i_ref += 2; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("catch_target: exception_table_index=%d", exception_table_index)); + } break; + + case 0x43: + // kind: type in instanceof expression + // location: Code + case 0x44: + // kind: type in new expression + // location: Code + case 0x45: + // kind: type in method reference expression using ::new + // location: Code + case 0x46: + // kind: type in method reference expression using ::Identifier + // location: Code + + { + // struct: + // offset_target { + // u2 offset; + // } + // + if ((byte_i_ref + 2) > type_annotations_typeArray->length()) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a offset_target")); + return false; + } + + u2 offset = Bytes::get_Java_u2((address) + type_annotations_typeArray->adr_at(byte_i_ref)); + byte_i_ref += 2; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("offset_target: offset=%d", offset)); + } break; + + case 0x47: + // kind: type in cast expression + // location: Code + case 0x48: + // kind: type argument for generic constructor in new expression or + // explicit constructor invocation statement + // location: Code + case 0x49: + // kind: type argument for generic method in method invocation expression + // location: Code + case 0x4A: + // kind: type argument for generic constructor in method reference expression using ::new + // location: Code + case 0x4B: + // kind: type argument for generic method in method reference expression using ::Identifier + // location: Code + + { + // struct: + // type_argument_target { + // u2 offset; + // u1 type_argument_index; + // } + // + if ((byte_i_ref + 3) > type_annotations_typeArray->length()) { + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a type_argument_target")); + return false; + } + + u2 offset = Bytes::get_Java_u2((address) + type_annotations_typeArray->adr_at(byte_i_ref)); + byte_i_ref += 2; + u1 type_argument_index = type_annotations_typeArray->at(byte_i_ref); + byte_i_ref += 1; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("type_argument_target: offset=%d, type_argument_index=%d", + offset, type_argument_index)); + } break; + + default: + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("unknown target_type")); +#ifdef ASSERT + ShouldNotReachHere(); +#endif + return false; + } + + return true; +} // end skip_type_annotation_target() + + +// Read, verify and skip over the type_path part so that rewriting +// can continue in the later parts of the struct. +// +// type_path { +// u1 path_length; +// { +// u1 type_path_kind; +// u1 type_argument_index; +// } path[path_length]; +// } +// +bool VM_RedefineClasses::skip_type_annotation_type_path( + AnnotationArray* type_annotations_typeArray, int &byte_i_ref, TRAPS) { + + if ((byte_i_ref + 1) > type_annotations_typeArray->length()) { + // not enough room for a path_length let alone the rest of the type_path + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for a type_path")); + return false; + } + + u1 path_length = type_annotations_typeArray->at(byte_i_ref); + byte_i_ref += 1; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("type_path: path_length=%d", path_length)); + + int calc_path_length = 0; + for (; calc_path_length < path_length; calc_path_length++) { + if ((byte_i_ref + 1 + 1) > type_annotations_typeArray->length()) { + // not enough room for a path + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("length() is too small for path entry %d of %d", + calc_path_length, path_length)); + return false; + } + + u1 type_path_kind = type_annotations_typeArray->at(byte_i_ref); + byte_i_ref += 1; + u1 type_argument_index = type_annotations_typeArray->at(byte_i_ref); + byte_i_ref += 1; + + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("type_path: path[%d]: type_path_kind=%d, type_argument_index=%d", + calc_path_length, type_path_kind, type_argument_index)); + + if (type_path_kind > 3 || (type_path_kind != 3 && type_argument_index != 0)) { + // not enough room for a path + RC_TRACE_WITH_THREAD(0x02000000, THREAD, + ("inconsistent type_path values")); + return false; + } + } + assert(path_length == calc_path_length, "sanity check"); + + return true; +} // end skip_type_annotation_type_path() + + // Rewrite constant pool references in the method's stackmap table. // These "structures" are adapted from the StackMapTable_attribute that // is described in section 4.8.4 of the 6.0 version of the VM spec @@ -3223,23 +3828,6 @@ void VM_RedefineClasses::swap_annotations(instanceKlassHandle the_class, instanceKlassHandle scratch_class) { - // Since there is currently no rewriting of type annotations indexes - // into the CP, we null out type annotations on scratch_class before - // we swap annotations with the_class rather than facing the - // possibility of shipping annotations with broken indexes to - // Java-land. - ClassLoaderData* loader_data = scratch_class->class_loader_data(); - AnnotationArray* new_class_type_annotations = scratch_class->class_type_annotations(); - if (new_class_type_annotations != NULL) { - MetadataFactory::free_array(loader_data, new_class_type_annotations); - scratch_class->annotations()->set_class_type_annotations(NULL); - } - Array* new_field_type_annotations = scratch_class->fields_type_annotations(); - if (new_field_type_annotations != NULL) { - Annotations::free_contents(loader_data, new_field_type_annotations); - scratch_class->annotations()->set_fields_type_annotations(NULL); - } - // Swap annotation fields values Annotations* old_annotations = the_class->annotations(); the_class->set_annotations(scratch_class->annotations()); --- ./hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -457,6 +457,17 @@ instanceKlassHandle scratch_class, TRAPS); bool rewrite_cp_refs_in_element_value( AnnotationArray* class_annotations, int &byte_i_ref, TRAPS); + bool rewrite_cp_refs_in_type_annotations_typeArray( + AnnotationArray* type_annotations_typeArray, int &byte_i_ref, + const char * location_mesg, TRAPS); + bool rewrite_cp_refs_in_type_annotation_struct( + AnnotationArray* type_annotations_typeArray, int &byte_i_ref, + const char * location_mesg, TRAPS); + bool skip_type_annotation_target( + AnnotationArray* type_annotations_typeArray, int &byte_i_ref, + const char * location_mesg, TRAPS); + bool skip_type_annotation_type_path( + AnnotationArray* type_annotations_typeArray, int &byte_i_ref, TRAPS); bool rewrite_cp_refs_in_fields_annotations( instanceKlassHandle scratch_class, TRAPS); void rewrite_cp_refs_in_method(methodHandle method, @@ -468,6 +479,12 @@ instanceKlassHandle scratch_class, TRAPS); bool rewrite_cp_refs_in_methods_parameter_annotations( instanceKlassHandle scratch_class, TRAPS); + bool rewrite_cp_refs_in_class_type_annotations( + instanceKlassHandle scratch_class, TRAPS); + bool rewrite_cp_refs_in_fields_type_annotations( + instanceKlassHandle scratch_class, TRAPS); + bool rewrite_cp_refs_in_methods_type_annotations( + instanceKlassHandle scratch_class, TRAPS); void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS); void rewrite_cp_refs_in_verification_type_info( address& stackmap_addr_ref, address stackmap_end, u2 frame_i, --- ./hotspot/src/share/vm/prims/jvmtiTagMap.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvmtiTagMap.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -3017,7 +3017,7 @@ // If there are any non-perm roots in the code cache, visit them. blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER); - CodeBlobToOopClosure look_in_blobs(&blk, false); + CodeBlobToOopClosure look_in_blobs(&blk, !CodeBlobToOopClosure::FixRelocations); CodeCache::scavenge_root_nmethods_do(&look_in_blobs); return true; --- ./hotspot/src/share/vm/prims/jvmtiThreadState.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/jvmtiThreadState.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -27,6 +27,7 @@ #include "prims/jvmtiEnvThreadState.hpp" #include "prims/jvmtiThreadState.hpp" +#include "runtime/thread.inline.hpp" // JvmtiEnvThreadStateIterator implementation --- ./hotspot/src/share/vm/prims/methodHandles.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/methodHandles.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,7 +29,6 @@ #include "interpreter/oopMapCache.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" -#include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/methodHandles.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/javaCalls.hpp" @@ -271,9 +270,12 @@ // This is done eagerly, since it is readily available without // constructing any new objects. // TO DO: maybe intern mname_oop - m->method_holder()->add_member_name(m->method_idnum(), mname); - - return mname(); + if (m->method_holder()->add_member_name(mname)) { + return mname(); + } else { + // Redefinition caused this to fail. Return NULL (and an exception?) + return NULL; + } } oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) { @@ -946,63 +948,27 @@ } } -void MemberNameTable::add_member_name(int index, jweak mem_name_wref) { +void MemberNameTable::add_member_name(jweak mem_name_wref) { assert_locked_or_safepoint(MemberNameTable_lock); - this->at_put_grow(index, mem_name_wref); -} - -// Return a member name oop or NULL. -oop MemberNameTable::get_member_name(int index) { - assert_locked_or_safepoint(MemberNameTable_lock); - - jweak ref = this->at(index); - oop mem_name = JNIHandles::resolve(ref); - return mem_name; + this->push(mem_name_wref); } #if INCLUDE_JVMTI -oop MemberNameTable::find_member_name_by_method(Method* old_method) { - assert_locked_or_safepoint(MemberNameTable_lock); - oop found = NULL; - int len = this->length(); - - for (int idx = 0; idx < len; idx++) { - oop mem_name = JNIHandles::resolve(this->at(idx)); - if (mem_name == NULL) { - continue; - } - Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name); - if (method == old_method) { - found = mem_name; - break; - } - } - return found; -} - -// It is called at safepoint only +// It is called at safepoint only for RedefineClasses void MemberNameTable::adjust_method_entries(Method** old_methods, Method** new_methods, int methods_length, bool *trace_name_printed) { assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); - // search the MemberNameTable for uses of either obsolete or EMCP methods + // For each redefined method for (int j = 0; j < methods_length; j++) { Method* old_method = old_methods[j]; Method* new_method = new_methods[j]; - oop mem_name = find_member_name_by_method(old_method); - if (mem_name != NULL) { - java_lang_invoke_MemberName::adjust_vmtarget(mem_name, new_method); - if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { - if (!(*trace_name_printed)) { - // RC_TRACE_MESG macro has an embedded ResourceMark - RC_TRACE_MESG(("adjust: name=%s", - old_method->method_holder()->external_name())); - *trace_name_printed = true; - } - // RC_TRACE macro has an embedded ResourceMark - RC_TRACE(0x00400000, ("MemberName method update: %s(%s)", - new_method->name()->as_C_string(), - new_method->signature()->as_C_string())); + // search the MemberNameTable for uses of either obsolete or EMCP methods + for (int idx = 0; idx < length(); idx++) { + oop mem_name = JNIHandles::resolve(this->at(idx)); + if (mem_name != NULL) { + java_lang_invoke_MemberName::adjust_vmtarget(mem_name, old_method, new_method, + trace_name_printed); } } } --- ./hotspot/src/share/vm/prims/methodHandles.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/methodHandles.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -236,18 +236,14 @@ public: MemberNameTable(int methods_cnt); ~MemberNameTable(); - void add_member_name(int index, jweak mem_name_ref); - oop get_member_name(int index); + void add_member_name(jweak mem_name_ref); #if INCLUDE_JVMTI - public: // RedefineClasses() API support: // If a MemberName refers to old_method then update it // to refer to new_method. void adjust_method_entries(Method** old_methods, Method** new_methods, int methods_length, bool *trace_name_printed); - private: - oop find_member_name_by_method(Method* old_method); #endif // INCLUDE_JVMTI }; --- ./hotspot/src/share/vm/prims/unsafe.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/unsafe.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -33,6 +33,8 @@ #include "prims/jvm.h" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/prefetch.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/reflection.hpp" #include "runtime/synchronizer.hpp" #include "services/threadService.hpp" @@ -951,6 +953,14 @@ } UNSAFE_END +static jobject get_class_loader(JNIEnv* env, jclass cls) { + if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) { + return NULL; + } + Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls)); + oop loader = k->class_loader(); + return JNIHandles::make_local(env, loader); +} UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length)) UnsafeWrapper("Unsafe_DefineClass"); @@ -959,7 +969,7 @@ int depthFromDefineClass0 = 1; jclass caller = JVM_GetCallerClass(env, depthFromDefineClass0); - jobject loader = (caller == NULL) ? NULL : JVM_GetClassLoader(env, caller); + jobject loader = (caller == NULL) ? NULL : get_class_loader(env, caller); jobject pd = (caller == NULL) ? NULL : JVM_GetProtectionDomain(env, caller); return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd); --- ./hotspot/src/share/vm/prims/whitebox.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/whitebox.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,6 +24,7 @@ #include "precompiled.hpp" +#include "memory/metadataFactory.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" @@ -36,21 +37,26 @@ #include "runtime/arguments.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/os.hpp" +#include "utilities/array.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" #include "utilities/exceptions.hpp" #if INCLUDE_ALL_GCS +#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" #include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #endif // INCLUDE_ALL_GCS -#ifdef INCLUDE_NMT +#if INCLUDE_NMT +#include "services/mallocSiteTable.hpp" #include "services/memTracker.hpp" +#include "utilities/nativeCallStack.hpp" #endif // INCLUDE_NMT #include "compiler/compileBroker.hpp" +#include "jvmtifiles/jvmtiEnv.hpp" #include "runtime/compilationPolicy.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -99,6 +105,51 @@ return closure.found(); WB_END +WB_ENTRY(jboolean, WB_ClassKnownToNotExist(JNIEnv* env, jobject o, jobject loader, jstring name)) + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + const char* class_name = env->GetStringUTFChars(name, NULL); + jboolean result = JVM_KnownToNotExist(env, loader, class_name); + env->ReleaseStringUTFChars(name, class_name); + return result; +WB_END + +WB_ENTRY(jobjectArray, WB_GetLookupCacheURLs(JNIEnv* env, jobject o, jobject loader)) + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + return JVM_GetResourceLookupCacheURLs(env, loader); +WB_END + +WB_ENTRY(jintArray, WB_GetLookupCacheMatches(JNIEnv* env, jobject o, jobject loader, jstring name)) + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + const char* resource_name = env->GetStringUTFChars(name, NULL); + jintArray result = JVM_GetResourceLookupCache(env, loader, resource_name); + + env->ReleaseStringUTFChars(name, resource_name); + return result; +WB_END + +WB_ENTRY(void, WB_AddToBootstrapClassLoaderSearch(JNIEnv* env, jobject o, jstring segment)) { +#if INCLUDE_JVMTI + ResourceMark rm; + const char* seg = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(segment)); + JvmtiEnv* jvmti_env = JvmtiEnv::create_a_jvmti(JVMTI_VERSION); + jvmtiError err = jvmti_env->AddToBootstrapClassLoaderSearch(seg); + assert(err == JVMTI_ERROR_NONE, "must not fail"); +#endif +} +WB_END + +WB_ENTRY(void, WB_AddToSystemClassLoaderSearch(JNIEnv* env, jobject o, jstring segment)) { +#if INCLUDE_JVMTI + ResourceMark rm; + const char* seg = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(segment)); + JvmtiEnv* jvmti_env = JvmtiEnv::create_a_jvmti(JVMTI_VERSION); + jvmtiError err = jvmti_env->AddToSystemClassLoaderSearch(seg); + assert(err == JVMTI_ERROR_NONE, "must not fail"); +#endif +} +WB_END + + WB_ENTRY(jlong, WB_GetCompressedOopsMaxHeapSize(JNIEnv* env, jobject o)) { return (jlong)Arguments::max_heap_for_compressed_oops(); } @@ -219,6 +270,30 @@ (size_t) magnitude, (size_t) iterations); WB_END +WB_ENTRY(jboolean, WB_isObjectInOldGen(JNIEnv* env, jobject o, jobject obj)) + oop p = JNIHandles::resolve(obj); +#if INCLUDE_ALL_GCS + if (UseG1GC) { + G1CollectedHeap* g1 = G1CollectedHeap::heap(); + const HeapRegion* hr = g1->heap_region_containing(p); + if (hr == NULL) { + return false; + } + return !(hr->is_young()); + } else if (UseParallelGC) { + ParallelScavengeHeap* psh = ParallelScavengeHeap::heap(); + return !psh->is_in_young(p); + } +#endif // INCLUDE_ALL_GCS + GenCollectedHeap* gch = GenCollectedHeap::heap(); + return !gch->is_in_young(p); +WB_END + +WB_ENTRY(jlong, WB_GetObjectSize(JNIEnv* env, jobject o, jobject obj)) + oop p = JNIHandles::resolve(obj); + return p->size() * HeapWordSize; +WB_END + #if INCLUDE_ALL_GCS WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj)) G1CollectedHeap* g1 = G1CollectedHeap::heap(); @@ -229,7 +304,7 @@ WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o)) G1CollectedHeap* g1 = G1CollectedHeap::heap(); - size_t nr = g1->free_regions(); + size_t nr = g1->num_free_regions(); return (jlong)nr; WB_END @@ -249,12 +324,16 @@ // NMT picks it up correctly WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size)) jlong addr = 0; + addr = (jlong)(uintptr_t)os::malloc(size, mtTest); + return addr; +WB_END - if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) { - addr = (jlong)(uintptr_t)os::malloc(size, mtTest); - } - - return addr; +// Alloc memory with pseudo call stack. The test can create psudo malloc +// allocation site to stress the malloc tracking. +WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack)) + address pc = (address)(size_t)pseudo_stack; + NativeCallStack stack(&pc, 1); + return (jlong)(uintptr_t)os::malloc(size, mtTest, stack); WB_END // Free the memory allocated by NMTAllocTest @@ -265,10 +344,8 @@ WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size)) jlong addr = 0; - if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) { addr = (jlong)(uintptr_t)os::reserve_memory(size); MemTracker::record_virtual_memory_type((address)addr, mtTest); - } return addr; WB_END @@ -287,20 +364,46 @@ os::release_memory((char *)(uintptr_t)addr, size); WB_END -// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature -WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env)) - - if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) { - return false; - } - - return MemTracker::wbtest_wait_for_data_merge(); +WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env)) + return MemTracker::tracking_level() == NMT_detail; WB_END -WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env)) - return MemTracker::tracking_level() == MemTracker::NMT_detail; +WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env)) + // Test that we can downgrade NMT levels but not upgrade them. + if (MemTracker::tracking_level() == NMT_off) { + MemTracker::transition_to(NMT_off); + return MemTracker::tracking_level() == NMT_off; + } else { + assert(MemTracker::tracking_level() == NMT_detail, "Should start out as detail tracking"); + MemTracker::transition_to(NMT_summary); + assert(MemTracker::tracking_level() == NMT_summary, "Should be summary now"); + + // Can't go to detail once NMT is set to summary. + MemTracker::transition_to(NMT_detail); + assert(MemTracker::tracking_level() == NMT_summary, "Should still be summary now"); + + // Shutdown sets tracking level to minimal. + MemTracker::shutdown(); + assert(MemTracker::tracking_level() == NMT_minimal, "Should be minimal now"); + + // Once the tracking level is minimal, we cannot increase to summary. + // The code ignores this request instead of asserting because if the malloc site + // table overflows in another thread, it tries to change the code to summary. + MemTracker::transition_to(NMT_summary); + assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now"); + + // Really can never go up to detail, verify that the code would never do this. + MemTracker::transition_to(NMT_detail); + assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now"); + return MemTracker::tracking_level() == NMT_minimal; + } WB_END +WB_ENTRY(jint, WB_NMTGetHashSize(JNIEnv* env, jobject o)) + int hash_size = MallocSiteTable::hash_buckets(); + assert(hash_size > 0, "NMT hash_size should be > 0"); + return (jint)hash_size; +WB_END #endif // INCLUDE_NMT static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) { @@ -322,19 +425,10 @@ CHECK_JNI_EXCEPTION_(env, result); MutexLockerEx mu(Compile_lock); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); - nmethod* code; if (is_osr) { - int bci = InvocationEntryBci; - while ((code = mh->lookup_osr_nmethod_for(bci, CompLevel_none, false)) != NULL) { - code->mark_for_deoptimization(); - ++result; - bci = code->osr_entry_bci() + 1; - } - } else { - code = mh->code(); - } - if (code != NULL) { - code->mark_for_deoptimization(); + result += mh->mark_osr_nmethods(); + } else if (mh->code() != NULL) { + mh->code()->mark_for_deoptimization(); ++result; } result += CodeCache::mark_for_deoptimization(mh()); @@ -495,12 +589,165 @@ #ifdef TIERED mcs->set_rate(0.0F); - mh->set_prev_event_count(0, THREAD); - mh->set_prev_time(0, THREAD); + mh->set_prev_event_count(0); + mh->set_prev_time(0); #endif } WB_END +template +static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*)) { + if (name == NULL) { + return false; + } + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + const char* flag_name = env->GetStringUTFChars(name, NULL); + bool result = (*TAt)(flag_name, value); + env->ReleaseStringUTFChars(name, flag_name); + return result; +} + +template +static bool SetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAtPut)(const char*, T*, Flag::Flags)) { + if (name == NULL) { + return false; + } + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + const char* flag_name = env->GetStringUTFChars(name, NULL); + bool result = (*TAtPut)(flag_name, value, Flag::INTERNAL); + env->ReleaseStringUTFChars(name, flag_name); + return result; +} + +template +static jobject box(JavaThread* thread, JNIEnv* env, Symbol* name, Symbol* sig, T value) { + ResourceMark rm(thread); + jclass clazz = env->FindClass(name->as_C_string()); + CHECK_JNI_EXCEPTION_(env, NULL); + jmethodID methodID = env->GetStaticMethodID(clazz, + vmSymbols::valueOf_name()->as_C_string(), + sig->as_C_string()); + CHECK_JNI_EXCEPTION_(env, NULL); + jobject result = env->CallStaticObjectMethod(clazz, methodID, value); + CHECK_JNI_EXCEPTION_(env, NULL); + return result; +} + +static jobject booleanBox(JavaThread* thread, JNIEnv* env, jboolean value) { + return box(thread, env, vmSymbols::java_lang_Boolean(), vmSymbols::Boolean_valueOf_signature(), value); +} +static jobject integerBox(JavaThread* thread, JNIEnv* env, jint value) { + return box(thread, env, vmSymbols::java_lang_Integer(), vmSymbols::Integer_valueOf_signature(), value); +} +static jobject longBox(JavaThread* thread, JNIEnv* env, jlong value) { + return box(thread, env, vmSymbols::java_lang_Long(), vmSymbols::Long_valueOf_signature(), value); +} +/* static jobject floatBox(JavaThread* thread, JNIEnv* env, jfloat value) { + return box(thread, env, vmSymbols::java_lang_Float(), vmSymbols::Float_valueOf_signature(), value); +}*/ +static jobject doubleBox(JavaThread* thread, JNIEnv* env, jdouble value) { + return box(thread, env, vmSymbols::java_lang_Double(), vmSymbols::Double_valueOf_signature(), value); +} + +WB_ENTRY(jobject, WB_GetBooleanVMFlag(JNIEnv* env, jobject o, jstring name)) + bool result; + if (GetVMFlag (thread, env, name, &result, &CommandLineFlags::boolAt)) { + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + return booleanBox(thread, env, result); + } + return NULL; +WB_END + +WB_ENTRY(jobject, WB_GetIntxVMFlag(JNIEnv* env, jobject o, jstring name)) + intx result; + if (GetVMFlag (thread, env, name, &result, &CommandLineFlags::intxAt)) { + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + return longBox(thread, env, result); + } + return NULL; +WB_END + +WB_ENTRY(jobject, WB_GetUintxVMFlag(JNIEnv* env, jobject o, jstring name)) + uintx result; + if (GetVMFlag (thread, env, name, &result, &CommandLineFlags::uintxAt)) { + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + return longBox(thread, env, result); + } + return NULL; +WB_END + +WB_ENTRY(jobject, WB_GetUint64VMFlag(JNIEnv* env, jobject o, jstring name)) + uint64_t result; + if (GetVMFlag (thread, env, name, &result, &CommandLineFlags::uint64_tAt)) { + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + return longBox(thread, env, result); + } + return NULL; +WB_END + +WB_ENTRY(jobject, WB_GetDoubleVMFlag(JNIEnv* env, jobject o, jstring name)) + double result; + if (GetVMFlag (thread, env, name, &result, &CommandLineFlags::doubleAt)) { + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + return doubleBox(thread, env, result); + } + return NULL; +WB_END + +WB_ENTRY(jstring, WB_GetStringVMFlag(JNIEnv* env, jobject o, jstring name)) + ccstr ccstrResult; + if (GetVMFlag (thread, env, name, &ccstrResult, &CommandLineFlags::ccstrAt)) { + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + jstring result = env->NewStringUTF(ccstrResult); + CHECK_JNI_EXCEPTION_(env, NULL); + return result; + } + return NULL; +WB_END + +WB_ENTRY(void, WB_SetBooleanVMFlag(JNIEnv* env, jobject o, jstring name, jboolean value)) + bool result = value == JNI_TRUE ? true : false; + SetVMFlag (thread, env, name, &result, &CommandLineFlags::boolAtPut); +WB_END + +WB_ENTRY(void, WB_SetIntxVMFlag(JNIEnv* env, jobject o, jstring name, jlong value)) + intx result = value; + SetVMFlag (thread, env, name, &result, &CommandLineFlags::intxAtPut); +WB_END + +WB_ENTRY(void, WB_SetUintxVMFlag(JNIEnv* env, jobject o, jstring name, jlong value)) + uintx result = value; + SetVMFlag (thread, env, name, &result, &CommandLineFlags::uintxAtPut); +WB_END + +WB_ENTRY(void, WB_SetUint64VMFlag(JNIEnv* env, jobject o, jstring name, jlong value)) + uint64_t result = value; + SetVMFlag (thread, env, name, &result, &CommandLineFlags::uint64_tAtPut); +WB_END + +WB_ENTRY(void, WB_SetDoubleVMFlag(JNIEnv* env, jobject o, jstring name, jdouble value)) + double result = value; + SetVMFlag (thread, env, name, &result, &CommandLineFlags::doubleAtPut); +WB_END + +WB_ENTRY(void, WB_SetStringVMFlag(JNIEnv* env, jobject o, jstring name, jstring value)) + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + const char* ccstrValue = (value == NULL) ? NULL : env->GetStringUTFChars(value, NULL); + ccstr ccstrResult = ccstrValue; + bool needFree; + { + ThreadInVMfromNative ttvfn(thread); // back to VM + needFree = SetVMFlag (thread, env, name, &ccstrResult, &CommandLineFlags::ccstrAtPut); + } + if (value != NULL) { + env->ReleaseStringUTFChars(value, ccstrValue); + } + if (needFree) { + FREE_C_HEAP_ARRAY(char, ccstrResult, mtInternal); + } +WB_END + + WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString)) ResourceMark rm(THREAD); int len; @@ -511,8 +758,17 @@ WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o)) Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(true); Universe::heap()->collect(GCCause::_last_ditch_collection); +#if INCLUDE_ALL_GCS + if (UseG1GC) { + // Needs to be cleared explicitly for G1 + Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(false); + } +#endif // INCLUDE_ALL_GCS WB_END +WB_ENTRY(void, WB_YoungGC(JNIEnv* env, jobject o)) + Universe::heap()->collect(GCCause::_wb_young_gc); +WB_END WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o)) // static+volatile in order to force the read to happen @@ -559,11 +815,7 @@ return result; } - clazz = env->FindClass(vmSymbols::java_lang_Integer()->as_C_string()); - CHECK_JNI_EXCEPTION_(env, NULL); - jmethodID constructor = env->GetMethodID(clazz, vmSymbols::object_initializer_name()->as_C_string(), vmSymbols::int_void_signature()->as_C_string()); - CHECK_JNI_EXCEPTION_(env, NULL); - jobject obj = env->NewObject(clazz, constructor, code->comp_level()); + jobject obj = integerBox(thread, env, code->comp_level()); CHECK_JNI_EXCEPTION_(env, NULL); env->SetObjectArrayElement(result, 0, obj); @@ -576,6 +828,62 @@ WB_END +int WhiteBox::array_bytes_to_length(size_t bytes) { + return Array::bytes_to_length(bytes); +} + +WB_ENTRY(jlong, WB_AllocateMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong size)) + if (size < 0) { + THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), + err_msg("WB_AllocateMetaspace: size is negative: " JLONG_FORMAT, size)); + } + + oop class_loader_oop = JNIHandles::resolve(class_loader); + ClassLoaderData* cld = class_loader_oop != NULL + ? java_lang_ClassLoader::loader_data(class_loader_oop) + : ClassLoaderData::the_null_class_loader_data(); + + void* metadata = MetadataFactory::new_writeable_array(cld, WhiteBox::array_bytes_to_length((size_t)size), thread); + + return (jlong)(uintptr_t)metadata; +WB_END + +WB_ENTRY(void, WB_FreeMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong addr, jlong size)) + oop class_loader_oop = JNIHandles::resolve(class_loader); + ClassLoaderData* cld = class_loader_oop != NULL + ? java_lang_ClassLoader::loader_data(class_loader_oop) + : ClassLoaderData::the_null_class_loader_data(); + + MetadataFactory::free_array(cld, (Array*)(uintptr_t)addr); +WB_END + +WB_ENTRY(jlong, WB_IncMetaspaceCapacityUntilGC(JNIEnv* env, jobject wb, jlong inc)) + if (inc < 0) { + THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), + err_msg("WB_IncMetaspaceCapacityUntilGC: inc is negative: " JLONG_FORMAT, inc)); + } + + jlong max_size_t = (jlong) ((size_t) -1); + if (inc > max_size_t) { + THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), + err_msg("WB_IncMetaspaceCapacityUntilGC: inc does not fit in size_t: " JLONG_FORMAT, inc)); + } + + size_t new_cap_until_GC = 0; + size_t aligned_inc = align_size_down((size_t) inc, Metaspace::commit_alignment()); + bool success = MetaspaceGC::inc_capacity_until_GC(aligned_inc, &new_cap_until_GC); + if (!success) { + THROW_MSG_0(vmSymbols::java_lang_IllegalStateException(), + "WB_IncMetaspaceCapacityUntilGC: could not increase capacity until GC " + "due to contention with another thread"); + } + return (jlong) new_cap_until_GC; +WB_END + +WB_ENTRY(jlong, WB_MetaspaceCapacityUntilGC(JNIEnv* env, jobject wb)) + return (jlong) MetaspaceGC::capacity_until_GC(); +WB_END + //Some convenience methods to deal with objects from java int WhiteBox::offset_for_field(const char* field_name, oop object, Symbol* signature_symbol) { @@ -626,17 +934,58 @@ return ret; } +void WhiteBox::register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread, JNINativeMethod* method_array, int method_count) { + ResourceMark rm; + ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + + // one by one registration natives for exception catching + jclass no_such_method_error_klass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string()); + CHECK_JNI_EXCEPTION(env); + for (int i = 0, n = method_count; i < n; ++i) { + // Skip dummy entries + if (method_array[i].fnPtr == NULL) continue; + if (env->RegisterNatives(wbclass, &method_array[i], 1) != 0) { + jthrowable throwable_obj = env->ExceptionOccurred(); + if (throwable_obj != NULL) { + env->ExceptionClear(); + if (env->IsInstanceOf(throwable_obj, no_such_method_error_klass)) { + // NoSuchMethodError is thrown when a method can't be found or a method is not native. + // Ignoring the exception since it is not preventing use of other WhiteBox methods. + tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", + method_array[i].name, method_array[i].signature); + } + } else { + // Registration failed unexpectedly. + tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered", + method_array[i].name, method_array[i].signature); + env->UnregisterNatives(wbclass); + break; + } + } + } +} #define CC (char*) static JNINativeMethod methods[] = { {CC"getObjectAddress", CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectAddress }, + {CC"getObjectSize", CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectSize }, + {CC"isObjectInOldGen", CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen }, {CC"getHeapOopSize", CC"()I", (void*)&WB_GetHeapOopSize }, {CC"isClassAlive0", CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive }, + {CC"classKnownToNotExist", + CC"(Ljava/lang/ClassLoader;Ljava/lang/String;)Z",(void*)&WB_ClassKnownToNotExist}, + {CC"getLookupCacheURLs", CC"(Ljava/lang/ClassLoader;)[Ljava/net/URL;", (void*)&WB_GetLookupCacheURLs}, + {CC"getLookupCacheMatches", CC"(Ljava/lang/ClassLoader;Ljava/lang/String;)[I", + (void*)&WB_GetLookupCacheMatches}, {CC"parseCommandLine", CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;", (void*) &WB_ParseCommandLine }, + {CC"addToBootstrapClassLoaderSearch", CC"(Ljava/lang/String;)V", + (void*)&WB_AddToBootstrapClassLoaderSearch}, + {CC"addToSystemClassLoaderSearch", CC"(Ljava/lang/String;)V", + (void*)&WB_AddToSystemClassLoaderSearch}, {CC"getCompressedOopsMaxHeapSize", CC"()J", (void*)&WB_GetCompressedOopsMaxHeapSize}, {CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes }, @@ -651,13 +1000,15 @@ #endif // INCLUDE_ALL_GCS #if INCLUDE_NMT {CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc }, + {CC"NMTMallocWithPseudoStack", CC"(JI)J", (void*)&WB_NMTMallocWithPseudoStack}, {CC"NMTFree", CC"(J)V", (void*)&WB_NMTFree }, {CC"NMTReserveMemory", CC"(J)J", (void*)&WB_NMTReserveMemory }, {CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory }, {CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory }, {CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory }, - {CC"NMTWaitForDataMerge", CC"()Z", (void*)&WB_NMTWaitForDataMerge}, {CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported}, + {CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel}, + {CC"NMTGetHashSize", CC"()I", (void*)&WB_NMTGetHashSize }, #endif // INCLUDE_NMT {CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll }, {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I", @@ -684,9 +1035,35 @@ CC"(Ljava/lang/reflect/Executable;II)Z", (void*)&WB_EnqueueMethodForCompilation}, {CC"clearMethodState", CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState}, - {CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable }, + {CC"setBooleanVMFlag", CC"(Ljava/lang/String;Z)V",(void*)&WB_SetBooleanVMFlag}, + {CC"setIntxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetIntxVMFlag}, + {CC"setUintxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetUintxVMFlag}, + {CC"setUint64VMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetUint64VMFlag}, + {CC"setDoubleVMFlag", CC"(Ljava/lang/String;D)V",(void*)&WB_SetDoubleVMFlag}, + {CC"setStringVMFlag", CC"(Ljava/lang/String;Ljava/lang/String;)V", + (void*)&WB_SetStringVMFlag}, + {CC"getBooleanVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Boolean;", + (void*)&WB_GetBooleanVMFlag}, + {CC"getIntxVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Long;", + (void*)&WB_GetIntxVMFlag}, + {CC"getUintxVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Long;", + (void*)&WB_GetUintxVMFlag}, + {CC"getUint64VMFlag", CC"(Ljava/lang/String;)Ljava/lang/Long;", + (void*)&WB_GetUint64VMFlag}, + {CC"getDoubleVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Double;", + (void*)&WB_GetDoubleVMFlag}, + {CC"getStringVMFlag", CC"(Ljava/lang/String;)Ljava/lang/String;", + (void*)&WB_GetStringVMFlag}, + {CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable }, {CC"fullGC", CC"()V", (void*)&WB_FullGC }, + {CC"youngGC", CC"()V", (void*)&WB_YoungGC }, {CC"readReservedMemory", CC"()V", (void*)&WB_ReadReservedMemory }, + {CC"allocateMetaspace", + CC"(Ljava/lang/ClassLoader;J)J", (void*)&WB_AllocateMetaspace }, + {CC"freeMetaspace", + CC"(Ljava/lang/ClassLoader;JJ)V", (void*)&WB_FreeMetaspace }, + {CC"incMetaspaceCapacityUntilGC", CC"(J)J", (void*)&WB_IncMetaspaceCapacityUntilGC }, + {CC"metaspaceCapacityUntilGC", CC"()J", (void*)&WB_MetaspaceCapacityUntilGC }, {CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures }, {CC"getNMethod", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;", (void*)&WB_GetNMethod }, @@ -701,35 +1078,9 @@ instanceKlassHandle ikh = instanceKlassHandle(JNIHandles::resolve(wbclass)->klass()); Handle loader(ikh->class_loader()); if (loader.is_null()) { - ResourceMark rm; - ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI - bool result = true; - // one by one registration natives for exception catching - jclass exceptionKlass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string()); - CHECK_JNI_EXCEPTION(env); - for (int i = 0, n = sizeof(methods) / sizeof(methods[0]); i < n; ++i) { - if (env->RegisterNatives(wbclass, methods + i, 1) != 0) { - result = false; - jthrowable throwable_obj = env->ExceptionOccurred(); - if (throwable_obj != NULL) { - env->ExceptionClear(); - if (env->IsInstanceOf(throwable_obj, exceptionKlass)) { - // j.l.NoSuchMethodError is thrown when a method can't be found or a method is not native - // ignoring the exception - tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", methods[i].name, methods[i].signature); - } - } else { - // register is failed w/o exception or w/ unexpected exception - tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered", methods[i].name, methods[i].signature); - env->UnregisterNatives(wbclass); - break; - } - } - } - - if (result) { - WhiteBox::set_used(); - } + WhiteBox::register_methods(env, wbclass, thread, methods, sizeof(methods) / sizeof(methods[0])); + WhiteBox::register_extended(env, wbclass, thread); + WhiteBox::set_used(); } } } --- ./hotspot/src/share/vm/prims/whitebox.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/prims/whitebox.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,6 +29,8 @@ #include "memory/allocation.hpp" #include "oops/oopsHierarchy.hpp" +#include "oops/symbol.hpp" +#include "runtime/interfaceSupport.hpp" // Entry macro to transition from JNI to VM state. @@ -62,6 +64,11 @@ Symbol* signature_symbol); static const char* lookup_jstring(const char* field_name, oop object); static bool lookup_bool(const char* field_name, oop object); + + static int array_bytes_to_length(size_t bytes); + static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread, + JNINativeMethod* method_array, int method_count); + static void register_extended(JNIEnv* env, jclass wbclass, JavaThread* thread); }; --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/prims/whitebox_ext.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "prims/whitebox.hpp" + +void WhiteBox::register_extended(JNIEnv* env, jclass wbclass, JavaThread* thread) { } --- ./hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -75,11 +75,14 @@ // update_rate() is called from select_task() while holding a compile queue lock. void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) { - JavaThread* THREAD = JavaThread::current(); + // Skip update if counters are absent. + // Can't allocate them since we are holding compile queue lock. + if (m->method_counters() == NULL) return; + if (is_old(m)) { // We don't remove old methods from the queue, // so we can just zero the rate. - m->set_rate(0, THREAD); + m->set_rate(0); return; } @@ -95,14 +98,15 @@ if (delta_s >= TieredRateUpdateMinTime) { // And we must've taken the previous point at least 1ms before. if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { - m->set_prev_time(t, THREAD); - m->set_prev_event_count(event_count, THREAD); - m->set_rate((float)delta_e / (float)delta_t, THREAD); // Rate is events per millisecond - } else + m->set_prev_time(t); + m->set_prev_event_count(event_count); + m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond + } else { if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { // If nothing happened for 25ms, zero the rate. Don't modify prev values. - m->set_rate(0, THREAD); + m->set_rate(0); } + } } } @@ -164,7 +168,6 @@ for (CompileTask* task = compile_queue->first(); task != NULL;) { CompileTask* next_task = task->next(); Method* method = task->method(); - MethodData* mdo = method->method_data(); update_rate(t, method); if (max_task == NULL) { max_task = task; @@ -175,8 +178,7 @@ if (PrintTieredEvents) { print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level()); } - CompileTaskWrapper ctw(task); // Frees the task - compile_queue->remove(task); + compile_queue->remove_and_mark_stale(task); method->clear_queued_for_compilation(); task = next_task; continue; @@ -314,8 +316,8 @@ * c. 0 -> (3->2) -> 4. * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough * to enable the profiling to fully occur at level 0. In this case we change the compilation level - * of the method to 2, because it'll allow it to run much faster without full profiling while c2 - * is compiling. + * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster + * without full profiling while c2 is compiling. * * d. 0 -> 3 -> 1 or 0 -> 2 -> 1. * After a method was once compiled with C1 it can be identified as trivial and be compiled to @@ -449,7 +451,7 @@ if (should_create_mdo(mh(), level)) { create_mdo(mh, thread); } - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) { + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { CompLevel next_level = call_event(mh(), level); if (next_level != level) { compile(mh, InvocationEntryBci, next_level, thread); @@ -473,7 +475,7 @@ CompLevel next_osr_level = loop_event(imh(), level); CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level(); // At the very least compile the OSR version - if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) { + if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) { compile(imh, bci, next_osr_level, thread); } @@ -507,7 +509,7 @@ nm->make_not_entrant(); } } - if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) { + if (!CompileBroker::compilation_is_in_queue(mh)) { // Fix up next_level if necessary to avoid deopts if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { next_level = CompLevel_full_profile; @@ -519,7 +521,7 @@ } else { cur_level = comp_level(imh()); next_level = call_event(imh(), cur_level); - if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) { + if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) { compile(imh, InvocationEntryBci, next_level, thread); } } --- ./hotspot/src/share/vm/runtime/arguments.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/arguments.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "classfile/classLoader.hpp" #include "classfile/javaAssertions.hpp" #include "classfile/symbolTable.hpp" #include "compiler/compilerOracle.hpp" @@ -34,12 +35,14 @@ #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/arguments.hpp" +#include "runtime/arguments_ext.hpp" #include "runtime/globals_extension.hpp" #include "runtime/java.hpp" #include "services/management.hpp" #include "services/memTracker.hpp" #include "utilities/defaultStream.hpp" #include "utilities/macros.hpp" +#include "utilities/stringUtils.hpp" #include "utilities/taskqueue.hpp" #ifdef TARGET_OS_FAMILY_linux # include "os_linux.inline.hpp" @@ -98,6 +101,8 @@ bool Arguments::_has_profile = false; size_t Arguments::_conservative_max_heap_alignment = 0; uintx Arguments::_min_heap_size = 0; +uintx Arguments::_min_heap_free_ratio = 0; +uintx Arguments::_max_heap_free_ratio = 0; Arguments::Mode Arguments::_mode = _mixed; bool Arguments::_java_compiler = false; bool Arguments::_xdebug_mode = false; @@ -294,6 +299,8 @@ { "UseMPSS", JDK_Version::jdk(8), JDK_Version::jdk(9) }, { "UseStringCache", JDK_Version::jdk(8), JDK_Version::jdk(9) }, { "UseOldInlining", JDK_Version::jdk(9), JDK_Version::jdk(10) }, + { "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) }, + { "CompilationRepeat", JDK_Version::jdk(8), JDK_Version::jdk(9) }, #ifdef PRODUCT { "DesiredMethodLimit", JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) }, @@ -1108,11 +1115,11 @@ // Conflict: required to use shared spaces (-Xshare:on), but // incompatible command line options were chosen. -static void no_shared_spaces() { +static void no_shared_spaces(const char* message) { if (RequireSharedSpaces) { jio_fprintf(defaultStream::error_stream(), "Class data sharing is inconsistent with other specified options.\n"); - vm_exit_during_initialization("Unable to use shared archive.", NULL); + vm_exit_during_initialization("Unable to use shared archive.", message); } else { FLAG_SET_DEFAULT(UseSharedSpaces, false); } @@ -1138,6 +1145,32 @@ } } +/** + * Returns the minimum number of compiler threads needed to run the JVM. The following + * configurations are possible. + * + * 1) The JVM is build using an interpreter only. As a result, the minimum number of + * compiler threads is 0. + * 2) The JVM is build using the compiler(s) and tiered compilation is disabled. As + * a result, either C1 or C2 is used, so the minimum number of compiler threads is 1. + * 3) The JVM is build using the compiler(s) and tiered compilation is enabled. However, + * the option "TieredStopAtLevel < CompLevel_full_optimization". As a result, only + * C1 can be used, so the minimum number of compiler threads is 1. + * 4) The JVM is build using the compilers and tiered compilation is enabled. The option + * 'TieredStopAtLevel = CompLevel_full_optimization' (the default value). As a result, + * the minimum number of compiler threads is 2. + */ +int Arguments::get_min_number_of_compiler_threads() { +#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) + return 0; // case 1 +#else + if (!TieredCompilation || (TieredStopAtLevel < CompLevel_full_optimization)) { + return 1; // case 2 or case 3 + } + return 2; // case 4 (tiered) +#endif +} + #if INCLUDE_ALL_GCS static void disable_adaptive_size_policy(const char* collector_name) { if (UseAdaptiveSizePolicy) { @@ -1398,10 +1431,26 @@ (int)ObjectAlignmentInBytes, os::vm_page_size()); return false; } + if(SurvivorAlignmentInBytes == 0) { + SurvivorAlignmentInBytes = ObjectAlignmentInBytes; + } else { + if (!is_power_of_2(SurvivorAlignmentInBytes)) { + jio_fprintf(defaultStream::error_stream(), + "error: SurvivorAlignmentInBytes=%d must be power of 2\n", + (int)SurvivorAlignmentInBytes); + return false; + } + if (SurvivorAlignmentInBytes < ObjectAlignmentInBytes) { + jio_fprintf(defaultStream::error_stream(), + "error: SurvivorAlignmentInBytes=%d must be greater than ObjectAlignmentInBytes=%d \n", + (int)SurvivorAlignmentInBytes, (int)ObjectAlignmentInBytes); + return false; + } + } return true; } -uintx Arguments::max_heap_for_compressed_oops() { +size_t Arguments::max_heap_for_compressed_oops() { // Avoid sign flip. assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size"); // We need to fit both the NULL page and the heap into the memory budget, while @@ -1505,28 +1554,31 @@ heap_alignment = G1CollectedHeap::conservative_max_heap_alignment(); } #endif // INCLUDE_ALL_GCS - _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(), - CollectorPolicy::compute_heap_alignment()); + _conservative_max_heap_alignment = MAX4(heap_alignment, + (size_t)os::vm_allocation_granularity(), + os::max_page_size(), + CollectorPolicy::compute_heap_alignment()); } -void Arguments::set_ergonomics_flags() { - +void Arguments::select_gc_ergonomically() { if (os::is_server_class_machine()) { - // If no other collector is requested explicitly, - // let the VM select the collector based on - // machine class and automatic selection policy. - if (!UseSerialGC && - !UseConcMarkSweepGC && - !UseG1GC && - !UseParNewGC && - FLAG_IS_DEFAULT(UseParallelGC)) { - if (should_auto_select_low_pause_collector()) { - FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true); - } else { - FLAG_SET_ERGO(bool, UseParallelGC, true); - } + if (should_auto_select_low_pause_collector()) { + FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true); + } else { + FLAG_SET_ERGO(bool, UseParallelGC, true); } } +} + +void Arguments::select_gc() { + if (!gc_selected()) { + ArgumentsExt::select_gc_ergonomically(); + } +} + +void Arguments::set_ergonomics_flags() { + select_gc(); + #ifdef COMPILER2 // Shared spaces work fine with other GCs but causes bytecode rewriting // to be disabled, which hurts interpreter performance and decreases @@ -1535,7 +1587,7 @@ // at link time, or rewrite bytecodes in non-shared methods. if (!DumpSharedSpaces && !RequireSharedSpaces && (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) { - no_shared_spaces(); + no_shared_spaces("COMPILER2 default: -Xshare:auto | off, have to manually setup to on."); } #endif @@ -1578,9 +1630,11 @@ // unless the user actually sets these flags. if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) { FLAG_SET_DEFAULT(MinHeapFreeRatio, 0); + _min_heap_free_ratio = MinHeapFreeRatio; } if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) { FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100); + _max_heap_free_ratio = MaxHeapFreeRatio; } } @@ -1619,6 +1673,12 @@ Abstract_VM_Version::parallel_worker_threads()); } +#if INCLUDE_ALL_GCS + if (G1ConcRefinementThreads == 0) { + FLAG_SET_DEFAULT(G1ConcRefinementThreads, ParallelGCThreads); + } +#endif + // MarkStackSize will be set (if it hasn't been set by the user) // when concurrent marking is initialized. // Its value will be based upon the number of parallel marking threads. @@ -1644,6 +1704,46 @@ } } +#if !INCLUDE_ALL_GCS +#ifdef ASSERT +static bool verify_serial_gc_flags() { + return (UseSerialGC && + !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC || + UseParallelGC || UseParallelOldGC)); +} +#endif // ASSERT +#endif // INCLUDE_ALL_GCS + +void Arguments::set_gc_specific_flags() { +#if INCLUDE_ALL_GCS + // Set per-collector flags + if (UseParallelGC || UseParallelOldGC) { + set_parallel_gc_flags(); + } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below + set_cms_and_parnew_gc_flags(); + } else if (UseParNewGC) { // Skipped if CMS is set above + set_parnew_gc_flags(); + } else if (UseG1GC) { + set_g1_gc_flags(); + } + check_deprecated_gcs(); + check_deprecated_gc_flags(); + if (AssumeMP && !UseSerialGC) { + if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) { + warning("If the number of processors is expected to increase from one, then" + " you should configure the number of parallel GC threads appropriately" + " using -XX:ParallelGCThreads=N"); + } + } + if (MinHeapFreeRatio == 100) { + // Keeping the heap 100% free is hard ;-) so limit it to 99%. + FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99); + } +#else // INCLUDE_ALL_GCS + assert(verify_serial_gc_flags(), "SerialGC unset"); +#endif // INCLUDE_ALL_GCS +} + julong Arguments::limit_by_allocatable_memory(julong limit) { julong max_allocatable; julong result = limit; @@ -1867,16 +1967,6 @@ return false; } -#if !INCLUDE_ALL_GCS -#ifdef ASSERT -static bool verify_serial_gc_flags() { - return (UseSerialGC && - !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC || - UseParallelGC || UseParallelOldGC)); -} -#endif // ASSERT -#endif // INCLUDE_ALL_GCS - // check if do gclog rotation // +UseGCLogFileRotation is a must, // no gc log rotation when log file not supplied or @@ -1955,6 +2045,8 @@ MaxHeapFreeRatio); return false; } + // This does not set the flag itself, but stores the value in a safe place for later usage. + _min_heap_free_ratio = min_heap_free_ratio; return true; } @@ -1969,11 +2061,13 @@ MinHeapFreeRatio); return false; } + // This does not set the flag itself, but stores the value in a safe place for later usage. + _max_heap_free_ratio = max_heap_free_ratio; return true; } // Check consistency of GC selection -bool Arguments::check_gc_consistency() { +bool Arguments::check_gc_consistency_user() { check_gclog_consistency(); bool status = true; // Ensure that the user has not selected conflicting sets @@ -2139,7 +2233,7 @@ FLAG_SET_DEFAULT(UseGCOverheadLimit, false); } - status = status && check_gc_consistency(); + status = status && check_gc_consistency_user(); status = status && check_stack_pages(); if (CMSIncrementalMode) { @@ -2322,10 +2416,13 @@ status = status && verify_percentage(MarkSweepDeadRatio, "MarkSweepDeadRatio"); status = status && verify_min_value(MarkSweepAlwaysCompactCount, 1, "MarkSweepAlwaysCompactCount"); +#ifdef COMPILER1 + status = status && verify_min_value(ValueMapInitialSize, 1, "ValueMapInitialSize"); +#endif if (PrintNMTStatistics) { #if INCLUDE_NMT - if (MemTracker::tracking_level() == MemTracker::NMT_off) { + if (MemTracker::tracking_level() == NMT_off) { #endif // INCLUDE_NMT warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled"); PrintNMTStatistics = false; @@ -2391,6 +2488,12 @@ status &= verify_interval(SafepointPollOffset, 0, os::vm_page_size() - BytesPerWord, "SafepointPollOffset"); #endif + int min_number_of_compiler_threads = get_min_number_of_compiler_threads(); + // The default CICompilerCount's value is CI_COMPILER_COUNT. + assert(min_number_of_compiler_threads <= CI_COMPILER_COUNT, "minimum should be less or equal default number"); + // Check the minimum number of compiler threads + status &=verify_min_value(CICompilerCount, min_number_of_compiler_threads, "CICompilerCount"); + return status; } @@ -2859,6 +2962,23 @@ #endif // -D } else if (match_option(option, "-D", &tail)) { + if (CheckEndorsedAndExtDirs) { + if (match_option(option, "-Djava.endorsed.dirs=", &tail)) { + // abort if -Djava.endorsed.dirs is set + jio_fprintf(defaultStream::output_stream(), + "-Djava.endorsed.dirs will not be supported in a future release.\n" + "Refer to JEP 220 for details (http://openjdk.java.net/jeps/220).\n"); + return JNI_EINVAL; + } + if (match_option(option, "-Djava.ext.dirs=", &tail)) { + // abort if -Djava.ext.dirs is set + jio_fprintf(defaultStream::output_stream(), + "-Djava.ext.dirs will not be supported in a future release.\n" + "Refer to JEP 220 for details (http://openjdk.java.net/jeps/220).\n"); + return JNI_EINVAL; + } + } + if (!add_property(tail)) { return JNI_ENOMEM; } @@ -3226,6 +3346,15 @@ } } + // PrintSharedArchiveAndExit will turn on + // -Xshare:on + // -XX:+TraceClassPaths + if (PrintSharedArchiveAndExit) { + FLAG_SET_CMDLINE(bool, UseSharedSpaces, true); + FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true); + FLAG_SET_CMDLINE(bool, TraceClassPaths, true); + } + // Change the default value for flags which have different default values // when working with older JDKs. #ifdef LINUX @@ -3234,9 +3363,195 @@ FLAG_SET_DEFAULT(UseLinuxPosixThreadCPUClocks, false); } #endif // LINUX + fix_appclasspath(); return JNI_OK; } +// Remove all empty paths from the app classpath (if IgnoreEmptyClassPaths is enabled) +// +// This is necessary because some apps like to specify classpath like -cp foo.jar:${XYZ}:bar.jar +// in their start-up scripts. If XYZ is empty, the classpath will look like "-cp foo.jar::bar.jar". +// Java treats such empty paths as if the user specified "-cp foo.jar:.:bar.jar". I.e., an empty +// path is treated as the current directory. +// +// This causes problems with CDS, which requires that all directories specified in the classpath +// must be empty. In most cases, applications do NOT want to load classes from the current +// directory anyway. Adding -XX:+IgnoreEmptyClassPaths will make these applications' start-up +// scripts compatible with CDS. +void Arguments::fix_appclasspath() { + if (IgnoreEmptyClassPaths) { + const char separator = *os::path_separator(); + const char* src = _java_class_path->value(); + + // skip over all the leading empty paths + while (*src == separator) { + src ++; + } + + char* copy = AllocateHeap(strlen(src) + 1, mtInternal); + strncpy(copy, src, strlen(src) + 1); + + // trim all trailing empty paths + for (char* tail = copy + strlen(copy) - 1; tail >= copy && *tail == separator; tail--) { + *tail = '\0'; + } + + char from[3] = {separator, separator, '\0'}; + char to [2] = {separator, '\0'}; + while (StringUtils::replace_no_expand(copy, from, to) > 0) { + // Keep replacing "::" -> ":" until we have no more "::" (non-windows) + // Keep replacing ";;" -> ";" until we have no more ";;" (windows) + } + + _java_class_path->set_value(copy); + FreeHeap(copy); // a copy was made by set_value, so don't need this anymore + } + + if (!PrintSharedArchiveAndExit) { + ClassLoader::trace_class_path("[classpath: ", _java_class_path->value()); + } +} + +static bool has_jar_files(const char* directory) { + DIR* dir = os::opendir(directory); + if (dir == NULL) return false; + + struct dirent *entry; + char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal); + bool hasJarFile = false; + while (!hasJarFile && (entry = os::readdir(dir, (dirent *) dbuf)) != NULL) { + const char* name = entry->d_name; + const char* ext = name + strlen(name) - 4; + hasJarFile = ext > name && (os::file_name_strcmp(ext, ".jar") == 0); + } + FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); + os::closedir(dir); + return hasJarFile ; +} + +// returns the number of directories in the given path containing JAR files +// If the skip argument is not NULL, it will skip that directory +static int check_non_empty_dirs(const char* path, const char* type, const char* skip) { + const char separator = *os::path_separator(); + const char* const end = path + strlen(path); + int nonEmptyDirs = 0; + while (path < end) { + const char* tmp_end = strchr(path, separator); + if (tmp_end == NULL) { + if ((skip == NULL || strcmp(path, skip) != 0) && has_jar_files(path)) { + nonEmptyDirs++; + jio_fprintf(defaultStream::output_stream(), + "Non-empty %s directory: %s\n", type, path); + } + path = end; + } else { + char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1, mtInternal); + memcpy(dirpath, path, tmp_end - path); + dirpath[tmp_end - path] = '\0'; + if ((skip == NULL || strcmp(dirpath, skip) != 0) && has_jar_files(dirpath)) { + nonEmptyDirs++; + jio_fprintf(defaultStream::output_stream(), + "Non-empty %s directory: %s\n", type, dirpath); + } + FREE_C_HEAP_ARRAY(char, dirpath, mtInternal); + path = tmp_end + 1; + } + } + return nonEmptyDirs; +} + +// Returns true if endorsed standards override mechanism and extension mechanism +// are not used. +static bool check_endorsed_and_ext_dirs() { + if (!CheckEndorsedAndExtDirs) + return true; + + char endorsedDir[JVM_MAXPATHLEN]; + char extDir[JVM_MAXPATHLEN]; + const char* fileSep = os::file_separator(); + jio_snprintf(endorsedDir, sizeof(endorsedDir), "%s%slib%sendorsed", + Arguments::get_java_home(), fileSep, fileSep); + jio_snprintf(extDir, sizeof(extDir), "%s%slib%sext", + Arguments::get_java_home(), fileSep, fileSep); + + // check endorsed directory + int nonEmptyDirs = check_non_empty_dirs(Arguments::get_endorsed_dir(), "endorsed", NULL); + + // check the extension directories but skip the default lib/ext directory + nonEmptyDirs += check_non_empty_dirs(Arguments::get_ext_dirs(), "extension", extDir); + + // List of JAR files installed in the default lib/ext directory. + // -XX:+CheckEndorsedAndExtDirs checks if any non-JDK file installed + static const char* jdk_ext_jars[] = { + "access-bridge-32.jar", + "access-bridge-64.jar", + "access-bridge.jar", + "cldrdata.jar", + "dnsns.jar", + "jaccess.jar", + "jfxrt.jar", + "localedata.jar", + "nashorn.jar", + "sunec.jar", + "sunjce_provider.jar", + "sunmscapi.jar", + "sunpkcs11.jar", + "ucrypto.jar", + "zipfs.jar", + NULL + }; + + // check if the default lib/ext directory has any non-JDK jar files; if so, error + DIR* dir = os::opendir(extDir); + if (dir != NULL) { + int num_ext_jars = 0; + struct dirent *entry; + char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(extDir), mtInternal); + while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) { + const char* name = entry->d_name; + const char* ext = name + strlen(name) - 4; + if (ext > name && (os::file_name_strcmp(ext, ".jar") == 0)) { + bool is_jdk_jar = false; + const char* jarfile = NULL; + for (int i=0; (jarfile = jdk_ext_jars[i]) != NULL; i++) { + if (os::file_name_strcmp(name, jarfile) == 0) { + is_jdk_jar = true; + break; + } + } + if (!is_jdk_jar) { + jio_fprintf(defaultStream::output_stream(), + "%s installed in /lib/ext\n", name); + num_ext_jars++; + } + } + } + FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); + os::closedir(dir); + if (num_ext_jars > 0) { + nonEmptyDirs += 1; + } + } + + // check if the default lib/endorsed directory exists; if so, error + dir = os::opendir(endorsedDir); + if (dir != NULL) { + jio_fprintf(defaultStream::output_stream(), "/lib/endorsed exists\n"); + os::closedir(dir); + nonEmptyDirs += 1; + } + + if (nonEmptyDirs > 0) { + jio_fprintf(defaultStream::output_stream(), + "Endorsed standards override mechanism and extension mechanism " + "will not be supported in a future release.\n" + "Refer to JEP 220 for details (http://openjdk.java.net/jeps/220).\n"); + return false; + } + + return true; +} + jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required) { // This must be done after all -D arguments have been processed. scp_p->expand_endorsed(); @@ -3246,6 +3561,10 @@ Arguments::set_sysclasspath(scp_p->combined_path()); } + if (!check_endorsed_and_ext_dirs()) { + return JNI_ERR; + } + // This must be done after all arguments have been processed. // java_compiler() true means set to "NONE" or empty. if (java_compiler() && !xdebug_mode()) { @@ -3407,9 +3726,8 @@ "Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL); } } else { - // UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces. if (!UseCompressedOops || !UseCompressedClassPointers) { - no_shared_spaces(); + no_shared_spaces("UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces."); } #endif } @@ -3493,6 +3811,8 @@ bool settings_file_specified = false; bool needs_hotspotrc_warning = false; + ArgumentsExt::process_options(args); + const char* flags_file; int index; for (index = 0; index < args->nOptions; index++) { @@ -3517,15 +3837,24 @@ CommandLineFlags::printFlags(tty, false); vm_exit(0); } +#if INCLUDE_NMT if (match_option(option, "-XX:NativeMemoryTracking", &tail)) { -#if INCLUDE_NMT - MemTracker::init_tracking_options(tail); -#else - jio_fprintf(defaultStream::error_stream(), - "Native Memory Tracking is not supported in this VM\n"); - return JNI_ERR; + // The launcher did not setup nmt environment variable properly. + if (!MemTracker::check_launcher_nmt_support(tail)) { + warning("Native Memory Tracking did not setup properly, using wrong launcher?"); + } + + // Verify if nmt option is valid. + if (MemTracker::verify_nmt_option()) { + // Late initialization, still in single-threaded mode. + if (MemTracker::tracking_level() >= NMT_summary) { + MemTracker::init(); + } + } else { + vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL); + } + } #endif - } #ifndef PRODUCT @@ -3663,7 +3992,7 @@ FLAG_SET_DEFAULT(UseSharedSpaces, false); FLAG_SET_DEFAULT(PrintSharedSpaces, false); } - no_shared_spaces(); + no_shared_spaces("CDS Disabled"); #endif // INCLUDE_CDS return JNI_OK; @@ -3677,7 +4006,7 @@ set_shared_spaces_flags(); // Check the GC selections again. - if (!check_gc_consistency()) { + if (!ArgumentsExt::check_gc_consistency_ergo()) { return JNI_EINVAL; } @@ -3699,33 +4028,7 @@ // Set heap size based on available physical memory set_heap_size(); -#if INCLUDE_ALL_GCS - // Set per-collector flags - if (UseParallelGC || UseParallelOldGC) { - set_parallel_gc_flags(); - } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below - set_cms_and_parnew_gc_flags(); - } else if (UseParNewGC) { // Skipped if CMS is set above - set_parnew_gc_flags(); - } else if (UseG1GC) { - set_g1_gc_flags(); - } - check_deprecated_gcs(); - check_deprecated_gc_flags(); - if (AssumeMP && !UseSerialGC) { - if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) { - warning("If the number of processors is expected to increase from one, then" - " you should configure the number of parallel GC threads appropriately" - " using -XX:ParallelGCThreads=N"); - } - } - if (MinHeapFreeRatio == 100) { - // Keeping the heap 100% free is hard ;-) so limit it to 99%. - FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99); - } -#else // INCLUDE_ALL_GCS - assert(verify_serial_gc_flags(), "SerialGC unset"); -#endif // INCLUDE_ALL_GCS + ArgumentsExt::set_gc_specific_flags(); // Initialize Metaspace flags and alignments. Metaspace::ergo_initialize(); @@ -3781,10 +4084,6 @@ // nothing to use the profiling, turn if off FLAG_SET_DEFAULT(TypeProfileLevel, 0); } - if (UseTypeSpeculation && FLAG_IS_DEFAULT(ReplaceInParentMaps)) { - // Doing the replace in parent maps helps speculation - FLAG_SET_DEFAULT(ReplaceInParentMaps, true); - } #endif if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) { @@ -3842,18 +4141,24 @@ } jint Arguments::adjust_after_os() { -#if INCLUDE_ALL_GCS - if (UseParallelGC || UseParallelOldGC) { - if (UseNUMA) { + if (UseNUMA) { + if (UseParallelGC || UseParallelOldGC) { if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) { - FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M); + FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M); } - // For those collectors or operating systems (eg, Windows) that do - // not support full UseNUMA, we will map to UseNUMAInterleaving for now - UseNUMAInterleaving = true; + } + // UseNUMAInterleaving is set to ON for all collectors and + // platforms when UseNUMA is set to ON. NUMA-aware collectors + // such as the parallel collector for Linux and Solaris will + // interleave old gen and survivor spaces on top of NUMA + // allocation policy for the eden space. + // Non NUMA-aware collectors such as CMS, G1 and Serial-GC on + // all platforms and ParallelGC on Windows will interleave all + // of the heap spaces across NUMA nodes. + if (FLAG_IS_DEFAULT(UseNUMAInterleaving)) { + FLAG_SET_ERGO(bool, UseNUMAInterleaving, true); } } -#endif // INCLUDE_ALL_GCS return JNI_OK; } --- ./hotspot/src/share/vm/runtime/arguments.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/arguments.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -284,7 +284,11 @@ // Value of the conservative maximum heap alignment needed static size_t _conservative_max_heap_alignment; - static uintx _min_heap_size; + static uintx _min_heap_size; + + // Used to store original flag values + static uintx _min_heap_free_ratio; + static uintx _max_heap_free_ratio; // -Xrun arguments static AgentLibraryList _libraryList; @@ -323,6 +327,7 @@ // Tiered static void set_tiered_flags(); + static int get_min_number_of_compiler_threads(); // CMS/ParNew garbage collectors static void set_parnew_gc_flags(); static void set_cms_and_parnew_gc_flags(); @@ -334,6 +339,7 @@ static void set_conservative_max_heap_alignment(); static void set_use_compressed_oops(); static void set_use_compressed_klass_ptrs(); + static void select_gc(); static void set_ergonomics_flags(); static void set_shared_spaces_flags(); // limits the given memory size by the maximum amount of memory this process is @@ -447,6 +453,10 @@ // Adjusts the arguments after the OS have adjusted the arguments static jint adjust_after_os(); + static void set_gc_specific_flags(); + static inline bool gc_selected(); // whether a gc has been selected + static void select_gc_ergonomically(); + // Verifies that the given value will fit as a MinHeapFreeRatio. If not, an error // message is returned in the provided buffer. static bool verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_heap_free_ratio); @@ -456,7 +466,8 @@ static bool verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio); // Check for consistency in the selection of the garbage collector. - static bool check_gc_consistency(); + static bool check_gc_consistency_user(); // Check user-selected gc + static inline bool check_gc_consistency_ergo(); // Check ergonomic-selected gc static void check_deprecated_gcs(); static void check_deprecated_gc_flags(); // Check consistecy or otherwise of VM argument settings @@ -513,6 +524,10 @@ static uintx min_heap_size() { return _min_heap_size; } static void set_min_heap_size(uintx v) { _min_heap_size = v; } + // Returns the original values of -XX:MinHeapFreeRatio and -XX:MaxHeapFreeRatio + static uintx min_heap_free_ratio() { return _min_heap_free_ratio; } + static uintx max_heap_free_ratio() { return _max_heap_free_ratio; } + // -Xrun static AgentLibrary* libraries() { return _libraryList.first(); } static bool init_libraries_at_startup() { return !_libraryList.is_empty(); } @@ -576,18 +591,32 @@ _meta_index_dir = meta_index_dir; } - static char *get_java_home() { return _java_home->value(); } - static char *get_dll_dir() { return _sun_boot_library_path->value(); } - static char *get_endorsed_dir() { return _java_endorsed_dirs->value(); } - static char *get_sysclasspath() { return _sun_boot_class_path->value(); } + static char* get_java_home() { return _java_home->value(); } + static char* get_dll_dir() { return _sun_boot_library_path->value(); } + static char* get_endorsed_dir() { return _java_endorsed_dirs->value(); } + static char* get_sysclasspath() { return _sun_boot_class_path->value(); } static char* get_meta_index_path() { return _meta_index_path; } static char* get_meta_index_dir() { return _meta_index_dir; } + static char* get_ext_dirs() { return _java_ext_dirs->value(); } + static char* get_appclasspath() { return _java_class_path->value(); } + static void fix_appclasspath(); // Operation modi - static Mode mode() { return _mode; } + static Mode mode() { return _mode; } + static bool is_interpreter_only() { return mode() == _int; } + // Utility: copies src into buf, replacing "%%" with "%" and "%p" with pid. static bool copy_expand_pid(const char* src, size_t srclen, char* buf, size_t buflen); }; +bool Arguments::gc_selected() { + return UseConcMarkSweepGC || UseG1GC || UseParallelGC || UseParallelOldGC || + UseParNewGC || UseSerialGC; +} + +bool Arguments::check_gc_consistency_ergo() { + return check_gc_consistency_user(); +} + #endif // SHARE_VM_RUNTIME_ARGUMENTS_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/runtime/arguments_ext.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP +#define SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP + +#include "memory/allocation.hpp" +#include "runtime/arguments.hpp" + +class ArgumentsExt: AllStatic { +public: + static inline void select_gc_ergonomically(); + static inline void set_gc_specific_flags(); + static inline bool check_gc_consistency_ergo(); + static void process_options(const JavaVMInitArgs* args) {} +}; + +void ArgumentsExt::select_gc_ergonomically() { + Arguments::select_gc_ergonomically(); +} + +void ArgumentsExt::set_gc_specific_flags() { + Arguments::set_gc_specific_flags(); +} + +bool ArgumentsExt::check_gc_consistency_ergo() { + return Arguments::check_gc_consistency_ergo(); +} + +#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP --- ./hotspot/src/share/vm/runtime/deoptimization.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/deoptimization.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -213,6 +213,8 @@ assert(vf->is_compiled_frame(), "Wrong frame type"); chunk->push(compiledVFrame::cast(vf)); + bool realloc_failures = false; + #ifdef COMPILER2 // Reallocate the non-escaping objects and restore their fields. Then // relock objects if synchronization on them was eliminated. @@ -228,7 +230,8 @@ // It is not guaranteed that we can get such information here only // by analyzing bytecode in deoptimized frames. This is why this flag // is set during method compilation (see Compile::Process_OopMap_Node()). - bool save_oop_result = chunk->at(0)->scope()->return_oop(); + // If the previous frame was popped, we don't have a result. + bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution(); Handle return_value; if (save_oop_result) { // Reallocation may trigger GC. If deoptimization happened on return from @@ -242,19 +245,16 @@ tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread); } } - bool reallocated = false; if (objects != NULL) { JRT_BLOCK - reallocated = realloc_objects(thread, &deoptee, objects, THREAD); + realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD); JRT_END - } - if (reallocated) { - reassign_fields(&deoptee, &map, objects); + reassign_fields(&deoptee, &map, objects, realloc_failures); #ifndef PRODUCT if (TraceDeoptimization) { ttyLocker ttyl; tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread); - print_objects(objects); + print_objects(objects, realloc_failures); } #endif } @@ -272,7 +272,7 @@ assert (cvf->scope() != NULL,"expect only compiled java frames"); GrowableArray* monitors = cvf->monitors(); if (monitors->is_nonempty()) { - relock_objects(monitors, thread); + relock_objects(monitors, thread, realloc_failures); #ifndef PRODUCT if (TraceDeoptimization) { ttyLocker ttyl; @@ -283,7 +283,12 @@ first = false; tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread); } - tty->print_cr(" object <" INTPTR_FORMAT "> locked", (void *)mi->owner()); + if (mi->owner_is_scalar_replaced()) { + Klass* k = java_lang_Class::as_Klass(mi->owner_klass()); + tty->print_cr(" failed reallocation for klass %s", k->external_name()); + } else { + tty->print_cr(" object <" INTPTR_FORMAT "> locked", (void *)mi->owner()); + } } } } @@ -298,9 +303,14 @@ // out the java state residing in the vframeArray will be missed. No_Safepoint_Verifier no_safepoint; - vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk); + vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures); +#ifdef COMPILER2 + if (realloc_failures) { + pop_frames_failed_reallocs(thread, array); + } +#endif - assert(thread->vframe_array_head() == NULL, "Pending deopt!");; + assert(thread->vframe_array_head() == NULL, "Pending deopt!"); thread->set_vframe_array_head(array); // Now that the vframeArray has been created if we have any deferred local writes @@ -698,7 +708,7 @@ (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size - top_frame_expression_stack_adjustment))) || (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) || - (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) && + (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) && (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size)) )) { ttyLocker ttyl; @@ -752,6 +762,8 @@ int exception_line = thread->exception_line(); thread->clear_pending_exception(); + bool failures = false; + for (int i = 0; i < objects->length(); i++) { assert(objects->at(i)->is_object(), "invalid debug information"); ObjectValue* sv = (ObjectValue*) objects->at(i); @@ -761,27 +773,34 @@ if (k->oop_is_instance()) { InstanceKlass* ik = InstanceKlass::cast(k()); - obj = ik->allocate_instance(CHECK_(false)); + obj = ik->allocate_instance(THREAD); } else if (k->oop_is_typeArray()) { TypeArrayKlass* ak = TypeArrayKlass::cast(k()); assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); int len = sv->field_size() / type2size[ak->element_type()]; - obj = ak->allocate(len, CHECK_(false)); + obj = ak->allocate(len, THREAD); } else if (k->oop_is_objArray()) { ObjArrayKlass* ak = ObjArrayKlass::cast(k()); - obj = ak->allocate(sv->field_size(), CHECK_(false)); + obj = ak->allocate(sv->field_size(), THREAD); } - assert(obj != NULL, "allocation failed"); + if (obj == NULL) { + failures = true; + } + assert(sv->value().is_null(), "redundant reallocation"); + assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception"); + CLEAR_PENDING_EXCEPTION; sv->set_value(obj); } - if (pending_exception.not_null()) { + if (failures) { + THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures); + } else if (pending_exception.not_null()) { thread->set_pending_exception(pending_exception(), exception_file, exception_line); } - return true; + return failures; } // This assumes that the fields are stored in ObjectValue in the same order @@ -919,12 +938,15 @@ // restore fields of all eliminated objects and arrays -void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects) { +void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects, bool realloc_failures) { for (int i = 0; i < objects->length(); i++) { ObjectValue* sv = (ObjectValue*) objects->at(i); KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()())); Handle obj = sv->value(); - assert(obj.not_null(), "reallocation was missed"); + assert(obj.not_null() || realloc_failures, "reallocation was missed"); + if (obj.is_null()) { + continue; + } if (k->oop_is_instance()) { InstanceKlass* ik = InstanceKlass::cast(k()); @@ -941,34 +963,36 @@ // relock objects for which synchronization was eliminated -void Deoptimization::relock_objects(GrowableArray* monitors, JavaThread* thread) { +void Deoptimization::relock_objects(GrowableArray* monitors, JavaThread* thread, bool realloc_failures) { for (int i = 0; i < monitors->length(); i++) { MonitorInfo* mon_info = monitors->at(i); if (mon_info->eliminated()) { - assert(mon_info->owner() != NULL, "reallocation was missed"); - Handle obj = Handle(mon_info->owner()); - markOop mark = obj->mark(); - if (UseBiasedLocking && mark->has_bias_pattern()) { - // New allocated objects may have the mark set to anonymously biased. - // Also the deoptimized method may called methods with synchronization - // where the thread-local object is bias locked to the current thread. - assert(mark->is_biased_anonymously() || - mark->biased_locker() == thread, "should be locked to current thread"); - // Reset mark word to unbiased prototype. - markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); - obj->set_mark(unbiased_prototype); + assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed"); + if (!mon_info->owner_is_scalar_replaced()) { + Handle obj = Handle(mon_info->owner()); + markOop mark = obj->mark(); + if (UseBiasedLocking && mark->has_bias_pattern()) { + // New allocated objects may have the mark set to anonymously biased. + // Also the deoptimized method may called methods with synchronization + // where the thread-local object is bias locked to the current thread. + assert(mark->is_biased_anonymously() || + mark->biased_locker() == thread, "should be locked to current thread"); + // Reset mark word to unbiased prototype. + markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); + obj->set_mark(unbiased_prototype); + } + BasicLock* lock = mon_info->lock(); + ObjectSynchronizer::slow_enter(obj, lock, thread); + assert(mon_info->owner()->is_locked(), "object must be locked now"); } - BasicLock* lock = mon_info->lock(); - ObjectSynchronizer::slow_enter(obj, lock, thread); } - assert(mon_info->owner()->is_locked(), "object must be locked now"); } } #ifndef PRODUCT // print information about reallocated objects -void Deoptimization::print_objects(GrowableArray* objects) { +void Deoptimization::print_objects(GrowableArray* objects, bool realloc_failures) { fieldDescriptor fd; for (int i = 0; i < objects->length(); i++) { @@ -978,10 +1002,15 @@ tty->print(" object <" INTPTR_FORMAT "> of type ", (void *)sv->value()()); k->print_value(); - tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); + assert(obj.not_null() || realloc_failures, "reallocation was missed"); + if (obj.is_null()) { + tty->print(" allocation failed"); + } else { + tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); + } tty->cr(); - if (Verbose) { + if (Verbose && !obj.is_null()) { k->oop_print_on(obj(), tty); } } @@ -989,7 +1018,7 @@ #endif #endif // COMPILER2 -vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray* chunk) { +vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray* chunk, bool realloc_failures) { Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp()); #ifndef PRODUCT @@ -1032,7 +1061,7 @@ // Since the Java thread being deoptimized will eventually adjust it's own stack, // the vframeArray containing the unpacking information is allocated in the C heap. // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames(). - vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr); + vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures); // Compare the vframeArray to the collected vframes assert(array->structural_compare(thread, chunk), "just checking"); @@ -1047,6 +1076,33 @@ return array; } +#ifdef COMPILER2 +void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) { + // Reallocation of some scalar replaced objects failed. Record + // that we need to pop all the interpreter frames for the + // deoptimized compiled frame. + assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?"); + thread->set_frames_to_pop_failed_realloc(array->frames()); + // Unlock all monitors here otherwise the interpreter will see a + // mix of locked and unlocked monitors (because of failed + // reallocations of synchronized objects) and be confused. + for (int i = 0; i < array->frames(); i++) { + MonitorChunk* monitors = array->element(i)->monitors(); + if (monitors != NULL) { + for (int j = 0; j < monitors->number_of_monitors(); j++) { + BasicObjectLock* src = monitors->at(j); + if (src->obj() != NULL) { + ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread); + } + } + array->element(i)->free_monitors(thread); +#ifdef ASSERT + array->element(i)->set_removed_monitors(); +#endif + } + } +} +#endif static void collect_monitors(compiledVFrame* cvf, GrowableArray* objects_to_revoke) { GrowableArray* monitors = cvf->monitors(); @@ -1835,7 +1891,8 @@ "predicate", "loop_limit_check", "speculate_class_check", - "rtm_state_change" + "rtm_state_change", + "unstable_if" }; const char* Deoptimization::_trap_action_name[Action_LIMIT] = { // Note: Keep this in sync. with enum DeoptAction. --- ./hotspot/src/share/vm/runtime/deoptimization.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/deoptimization.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -61,6 +61,7 @@ Reason_loop_limit_check, // compiler generated loop limits check failed Reason_speculate_class_check, // saw unexpected object class from type speculation Reason_rtm_state_change, // rtm state change detected + Reason_unstable_if, // a branch predicted always false was taken Reason_LIMIT, // Note: Keep this enum in sync. with _trap_reason_name. Reason_RECORDED_LIMIT = Reason_bimorphic // some are not recorded per bc @@ -119,13 +120,14 @@ static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray* objects, TRAPS); static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type); static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj); - static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects); - static void relock_objects(GrowableArray* monitors, JavaThread* thread); - NOT_PRODUCT(static void print_objects(GrowableArray* objects);) + static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects, bool realloc_failures); + static void relock_objects(GrowableArray* monitors, JavaThread* thread, bool realloc_failures); + static void pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array); + NOT_PRODUCT(static void print_objects(GrowableArray* objects, bool realloc_failures);) #endif // COMPILER2 public: - static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray* chunk); + static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray* chunk, bool realloc_failures); // Interface used for unpacking deoptimized frames @@ -315,6 +317,8 @@ return Reason_null_check; // recorded per BCI as a null check else if (reason == Reason_speculate_class_check) return Reason_class_check; + else if (reason == Reason_unstable_if) + return Reason_intrinsic; else return Reason_none; } --- ./hotspot/src/share/vm/runtime/fprofiler.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/fprofiler.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -38,6 +38,7 @@ #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/task.hpp" +#include "runtime/thread.inline.hpp" #include "runtime/vframe.hpp" #include "utilities/macros.hpp" --- ./hotspot/src/share/vm/runtime/frame.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/frame.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -900,7 +900,7 @@ } -void frame::oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f, +void frame::oops_interpreted_do(OopClosure* f, CLDClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache) { assert(is_interpreted_frame(), "Not an interpreted frame"); assert(map != NULL, "map must be set"); @@ -1140,7 +1140,7 @@ } -void frame::oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) { +void frame::oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) { #ifndef PRODUCT // simulate GC crash here to dump java thread in error report if (CrashGCForDumpingJavaThread) { --- ./hotspot/src/share/vm/runtime/frame.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/frame.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -419,19 +419,19 @@ // Oops-do's void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f); - void oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true); + void oops_interpreted_do(OopClosure* f, CLDClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true); private: void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f); // Iteration of oops - void oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache); + void oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache); void oops_entry_do(OopClosure* f, const RegisterMap* map); void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map); int adjust_offset(Method* method, int index); // helper for above fn public: // Memory management - void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); } + void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); } void nmethods_do(CodeBlobClosure* cf); // RedefineClasses support for finding live interpreted methods on the stack --- ./hotspot/src/share/vm/runtime/globals.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/globals.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -243,6 +243,11 @@ return is_unlocked_ext(); } +void Flag::unlock_diagnostic() { + assert(is_diagnostic(), "sanity"); + _flags = Flags(_flags & ~KIND_DIAGNOSTIC); +} + // Get custom message for this locked flag, or return NULL if // none is available. void Flag::get_locked_message(char* buf, int buflen) const { @@ -611,7 +616,7 @@ e.commit(); } -bool CommandLineFlags::boolAt(char* name, size_t len, bool* value) { +bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_bool()) return false; @@ -619,7 +624,7 @@ return true; } -bool CommandLineFlags::boolAtPut(char* name, size_t len, bool* value, Flag::Flags origin) { +bool CommandLineFlags::boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_bool()) return false; @@ -639,7 +644,7 @@ faddr->set_origin(origin); } -bool CommandLineFlags::intxAt(char* name, size_t len, intx* value) { +bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_intx()) return false; @@ -647,7 +652,7 @@ return true; } -bool CommandLineFlags::intxAtPut(char* name, size_t len, intx* value, Flag::Flags origin) { +bool CommandLineFlags::intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_intx()) return false; @@ -667,7 +672,7 @@ faddr->set_origin(origin); } -bool CommandLineFlags::uintxAt(char* name, size_t len, uintx* value) { +bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_uintx()) return false; @@ -675,7 +680,7 @@ return true; } -bool CommandLineFlags::uintxAtPut(char* name, size_t len, uintx* value, Flag::Flags origin) { +bool CommandLineFlags::uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_uintx()) return false; @@ -695,7 +700,7 @@ faddr->set_origin(origin); } -bool CommandLineFlags::uint64_tAt(char* name, size_t len, uint64_t* value) { +bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_uint64_t()) return false; @@ -703,7 +708,7 @@ return true; } -bool CommandLineFlags::uint64_tAtPut(char* name, size_t len, uint64_t* value, Flag::Flags origin) { +bool CommandLineFlags::uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_uint64_t()) return false; @@ -723,7 +728,7 @@ faddr->set_origin(origin); } -bool CommandLineFlags::doubleAt(char* name, size_t len, double* value) { +bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_double()) return false; @@ -731,7 +736,7 @@ return true; } -bool CommandLineFlags::doubleAtPut(char* name, size_t len, double* value, Flag::Flags origin) { +bool CommandLineFlags::doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_double()) return false; @@ -751,7 +756,7 @@ faddr->set_origin(origin); } -bool CommandLineFlags::ccstrAt(char* name, size_t len, ccstr* value) { +bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_ccstr()) return false; @@ -759,9 +764,7 @@ return true; } -// Contract: Flag will make private copy of the incoming value. -// Outgoing value is always malloc-ed, and caller MUST call free. -bool CommandLineFlags::ccstrAtPut(char* name, size_t len, ccstr* value, Flag::Flags origin) { +bool CommandLineFlags::ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin) { Flag* result = Flag::find_flag(name, len); if (result == NULL) return false; if (!result->is_ccstr()) return false; @@ -784,7 +787,6 @@ return true; } -// Contract: Flag will make private copy of the incoming value. void CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin) { Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type"); --- ./hotspot/src/share/vm/runtime/globals.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/globals.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -313,6 +313,8 @@ bool is_writeable_ext() const; bool is_external_ext() const; + void unlock_diagnostic(); + void get_locked_message(char*, int) const; void get_locked_message_ext(char*, int) const; @@ -363,35 +365,37 @@ class CommandLineFlags { public: - static bool boolAt(char* name, size_t len, bool* value); - static bool boolAt(char* name, bool* value) { return boolAt(name, strlen(name), value); } - static bool boolAtPut(char* name, size_t len, bool* value, Flag::Flags origin); - static bool boolAtPut(char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); } + static bool boolAt(const char* name, size_t len, bool* value); + static bool boolAt(const char* name, bool* value) { return boolAt(name, strlen(name), value); } + static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin); + static bool boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); } - static bool intxAt(char* name, size_t len, intx* value); - static bool intxAt(char* name, intx* value) { return intxAt(name, strlen(name), value); } - static bool intxAtPut(char* name, size_t len, intx* value, Flag::Flags origin); - static bool intxAtPut(char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); } + static bool intxAt(const char* name, size_t len, intx* value); + static bool intxAt(const char* name, intx* value) { return intxAt(name, strlen(name), value); } + static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin); + static bool intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); } - static bool uintxAt(char* name, size_t len, uintx* value); - static bool uintxAt(char* name, uintx* value) { return uintxAt(name, strlen(name), value); } - static bool uintxAtPut(char* name, size_t len, uintx* value, Flag::Flags origin); - static bool uintxAtPut(char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); } + static bool uintxAt(const char* name, size_t len, uintx* value); + static bool uintxAt(const char* name, uintx* value) { return uintxAt(name, strlen(name), value); } + static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin); + static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); } - static bool uint64_tAt(char* name, size_t len, uint64_t* value); - static bool uint64_tAt(char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); } - static bool uint64_tAtPut(char* name, size_t len, uint64_t* value, Flag::Flags origin); - static bool uint64_tAtPut(char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); } + static bool uint64_tAt(const char* name, size_t len, uint64_t* value); + static bool uint64_tAt(const char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); } + static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin); + static bool uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); } - static bool doubleAt(char* name, size_t len, double* value); - static bool doubleAt(char* name, double* value) { return doubleAt(name, strlen(name), value); } - static bool doubleAtPut(char* name, size_t len, double* value, Flag::Flags origin); - static bool doubleAtPut(char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); } + static bool doubleAt(const char* name, size_t len, double* value); + static bool doubleAt(const char* name, double* value) { return doubleAt(name, strlen(name), value); } + static bool doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin); + static bool doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); } - static bool ccstrAt(char* name, size_t len, ccstr* value); - static bool ccstrAt(char* name, ccstr* value) { return ccstrAt(name, strlen(name), value); } - static bool ccstrAtPut(char* name, size_t len, ccstr* value, Flag::Flags origin); - static bool ccstrAtPut(char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); } + static bool ccstrAt(const char* name, size_t len, ccstr* value); + static bool ccstrAt(const char* name, ccstr* value) { return ccstrAt(name, strlen(name), value); } + // Contract: Flag will make private copy of the incoming value. + // Outgoing value is always malloc-ed, and caller MUST call free. + static bool ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin); + static bool ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); } // Returns false if name is not a command line flag. static bool wasSetOnCmdline(const char* name, bool* value); @@ -595,6 +599,9 @@ product(bool, UseAES, false, \ "Control whether AES instructions can be used on x86/x64") \ \ + product(bool, UseSHA, false, \ + "Control whether SHA instructions can be used on SPARC") \ + \ product(uintx, LargePageSizeInBytes, 0, \ "Large page size (0 to let VM choose the page size)") \ \ @@ -701,6 +708,15 @@ product(bool, UseAESIntrinsics, false, \ "Use intrinsics for AES versions of crypto") \ \ + product(bool, UseSHA1Intrinsics, false, \ + "Use intrinsics for SHA-1 crypto hash function") \ + \ + product(bool, UseSHA256Intrinsics, false, \ + "Use intrinsics for SHA-224 and SHA-256 crypto hash functions") \ + \ + product(bool, UseSHA512Intrinsics, false, \ + "Use intrinsics for SHA-384 and SHA-512 crypto hash functions") \ + \ product(bool, UseCRC32Intrinsics, false, \ "use intrinsics for java.util.zip.CRC32") \ \ @@ -929,11 +945,6 @@ diagnostic(bool, PrintNMTStatistics, false, \ "Print native memory tracking summary data if it is on") \ \ - diagnostic(bool, AutoShutdownNMT, true, \ - "Automatically shutdown native memory tracking under stress " \ - "situations. When set to false, native memory tracking tries to " \ - "stay alive at the expense of JVM performance") \ - \ diagnostic(bool, LogCompilation, false, \ "Log compilation activity in detail to LogFile") \ \ @@ -1062,6 +1073,9 @@ product(bool, ClassUnloading, true, \ "Do unloading of classes") \ \ + product(bool, ClassUnloadingWithConcurrentMark, true, \ + "Do unloading of classes with a concurrent marking cycle") \ + \ develop(bool, DisableStartThread, false, \ "Disable starting of additional Java threads " \ "(for debugging only)") \ @@ -1196,9 +1210,17 @@ product(bool, CheckJNICalls, false, \ "Verify all arguments to JNI calls") \ \ + product(bool, CheckEndorsedAndExtDirs, false, \ + "Verify the endorsed and extension directories are not used") \ + \ product(bool, UseFastJNIAccessors, true, \ "Use optimized versions of GetField") \ \ + product(intx, MaxJNILocalCapacity, 65536, \ + "Maximum allowable local JNI handle capacity to " \ + "EnsureLocalCapacity() and PushLocalFrame(), " \ + "where <= 0 is unlimited, default: 65536") \ + \ product(bool, EagerXrunInit, false, \ "Eagerly initialize -Xrun libraries; allows startup profiling, " \ "but not all -Xrun libraries may support the state of the VM " \ @@ -1923,6 +1945,10 @@ "not just one of the generations (e.g., G1). A value of 0 " \ "denotes 'do constant GC cycles'.") \ \ + manageable(intx, CMSTriggerInterval, -1, \ + "Commence a CMS collection cycle (at least) every so many " \ + "milliseconds (0 permanently, -1 disabled)") \ + \ product(bool, UseCMSInitiatingOccupancyOnly, false, \ "Only use occupancy as a criterion for starting a CMS collection")\ \ @@ -2287,6 +2313,9 @@ manageable(bool, PrintGCTimeStamps, false, \ "Print timestamps at garbage collection") \ \ + manageable(bool, PrintGCID, false, \ + "Print an identifier for each garbage collection") \ + \ product(bool, PrintGCTaskTimeStamps, false, \ "Print timestamps for individual gc worker thread tasks") \ \ @@ -2310,6 +2339,12 @@ notproduct(bool, TraceScavenge, false, \ "Trace scavenge") \ \ + product(bool, IgnoreEmptyClassPaths, false, \ + "Ignore empty path elements in -classpath") \ + \ + product(bool, TraceClassPaths, false, \ + "Trace processing of class paths") \ + \ product_rw(bool, TraceClassLoading, false, \ "Trace all classes loaded") \ \ @@ -3760,6 +3795,13 @@ product(bool, PrintSharedSpaces, false, \ "Print usage of shared spaces") \ \ + product(bool, PrintSharedArchiveAndExit, false, \ + "Print shared archive file contents") \ + \ + product(bool, PrintSharedDictionary, false, \ + "If PrintSharedArchiveAndExit is true, also print the shared " \ + "dictionary") \ + \ product(uintx, SharedReadWriteSize, NOT_LP64(12*M) LP64_ONLY(16*M), \ "Size of read-write space for metadata (in bytes)") \ \ @@ -3780,6 +3822,10 @@ "support JSR 292 (method handles, invokedynamic, " \ "anonymous classes") \ \ + diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false, \ + "Do not quit -Xshare:dump even if we encounter unverifiable " \ + "classes. Just exclude them from the shared dictionary.") \ + \ diagnostic(bool, PrintMethodHandleStubs, false, \ "Print generated stub code for method handles") \ \ @@ -3869,12 +3915,25 @@ product(bool, PrintGCCause, true, \ "Include GC cause in GC logging") \ \ + experimental(intx, SurvivorAlignmentInBytes, 0, \ + "Default survivor space alignment in bytes") \ + \ product(bool , AllowNonVirtualCalls, false, \ "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \ \ + product(ccstr, DumpLoadedClassList, NULL, \ + "Dump the names all loaded classes, that could be stored into " \ + "the CDS archive, in the specified file") \ + \ + product(ccstr, SharedClassListFile, NULL, \ + "Override the default CDS class list") \ + \ diagnostic(ccstr, SharedArchiveFile, NULL, \ "Override the default location of the CDS archive file") \ \ + product(ccstr, ExtraSharedClassListFile, NULL, \ + "Extra classlist for building the CDS archive file") \ + \ experimental(uintx, ArrayAllocatorMallocLimit, \ SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx), \ "Allocation less than this value will be allocated " \ --- ./hotspot/src/share/vm/runtime/globals_extension.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/globals_extension.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -201,6 +201,7 @@ static void uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin); static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin); static void doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin); + // Contract: Flag will make private copy of the incoming value static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin); static bool is_default(CommandLineFlag flag); --- ./hotspot/src/share/vm/runtime/handles.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/handles.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -227,7 +227,7 @@ HandleArea* _prev; // link to outer (older) area public: // Constructor - HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) { + HandleArea(HandleArea* prev) : Arena(mtThread, Chunk::tiny_size) { debug_only(_handle_mark_nesting = 0); debug_only(_no_handle_mark_nesting = 0); _prev = prev; --- ./hotspot/src/share/vm/runtime/init.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/init.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,8 +34,10 @@ #include "runtime/init.hpp" #include "runtime/safepoint.hpp" #include "runtime/sharedRuntime.hpp" +#include "services/memTracker.hpp" #include "utilities/macros.hpp" + // Initialization done by VM thread in vm_init_globals() void check_ThreadShadow(); void eventlog_init(); @@ -131,6 +133,12 @@ javaClasses_init(); // must happen after vtable initialization stubRoutines_init2(); // note: StubRoutines need 2-phase init +#if INCLUDE_NMT + // Solaris stack is walkable only after stubRoutines are set up. + // On Other platforms, the stack is always walkable. + NMT_stack_walkable = true; +#endif // INCLUDE_NMT + // All the flags that get adjusted by VM_Version_init and os::init_2 // have been set so dump the flags now. if (PrintFlagsFinal) { --- ./hotspot/src/share/vm/runtime/interfaceSupport.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/interfaceSupport.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -30,6 +30,7 @@ #include "memory/resourceArea.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/threadLocalStorage.hpp" #include "runtime/vframe.hpp" #include "utilities/preserveException.hpp" @@ -84,7 +85,7 @@ // Short-circuit any possible re-entrant gc-a-lot attempt if (thread->skip_gcalot()) return; - if (is_init_completed()) { + if (Threads::is_vm_complete()) { if (++_fullgc_alot_invocation < FullGCALotStart) { return; --- ./hotspot/src/share/vm/runtime/java.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/java.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -57,7 +57,6 @@ #include "runtime/thread.inline.hpp" #include "runtime/timer.hpp" #include "runtime/vm_operations.hpp" -#include "services/memReporter.hpp" #include "services/memTracker.hpp" #include "trace/tracing.hpp" #include "utilities/dtrace.hpp" @@ -364,12 +363,7 @@ #endif // ENABLE_ZAP_DEAD_LOCALS // Native memory tracking data if (PrintNMTStatistics) { - if (MemTracker::is_on()) { - BaselineTTYOutputer outputer(tty); - MemTracker::print_memory_usage(outputer, K, false); - } else { - tty->print_cr("%s", MemTracker::reason()); - } + MemTracker::final_report(tty); } } @@ -401,12 +395,7 @@ // Native memory tracking data if (PrintNMTStatistics) { - if (MemTracker::is_on()) { - BaselineTTYOutputer outputer(tty); - MemTracker::print_memory_usage(outputer, K, false); - } else { - tty->print_cr("%s", MemTracker::reason()); - } + MemTracker::final_report(tty); } } @@ -555,10 +544,6 @@ BeforeExit_lock->notify_all(); } - // Shutdown NMT before exit. Otherwise, - // it will run into trouble when system destroys static variables. - MemTracker::shutdown(MemTracker::NMT_normal); - if (VerifyStringTableAtExit) { int fail_cnt = 0; { --- ./hotspot/src/share/vm/runtime/javaCalls.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/javaCalls.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -308,6 +308,10 @@ } void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArguments* args, TRAPS) { + // During dumping, Java execution environment is not fully initialized. Also, Java execution + // may cause undesirable side-effects in the class metadata. + assert(!DumpSharedSpaces, "must not execute Java bytecodes when dumping"); + methodHandle method = *m; JavaThread* thread = (JavaThread*)THREAD; assert(thread->is_Java_thread(), "must be called by a java thread"); --- ./hotspot/src/share/vm/runtime/javaFrameAnchor.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/javaFrameAnchor.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,39 +26,7 @@ #define SHARE_VM_RUNTIME_JAVAFRAMEANCHOR_HPP #include "utilities/globalDefinitions.hpp" -#ifdef TARGET_OS_ARCH_linux_x86 -# include "orderAccess_linux_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_sparc -# include "orderAccess_linux_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_zero -# include "orderAccess_linux_zero.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_x86 -# include "orderAccess_solaris_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_sparc -# include "orderAccess_solaris_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_windows_x86 -# include "orderAccess_windows_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_arm -# include "orderAccess_linux_arm.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_ppc -# include "orderAccess_linux_ppc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_aix_ppc -# include "orderAccess_aix_ppc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_x86 -# include "orderAccess_bsd_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_zero -# include "orderAccess_bsd_zero.inline.hpp" -#endif +#include "runtime/orderAccess.inline.hpp" // // An object for encapsulating the machine/os dependent part of a JavaThread frame state --- ./hotspot/src/share/vm/runtime/mutex.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/mutex.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "runtime/mutex.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/thread.inline.hpp" #include "utilities/events.hpp" --- ./hotspot/src/share/vm/runtime/mutexLocker.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/mutexLocker.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -69,7 +69,7 @@ Monitor* SerializePage_lock = NULL; Monitor* Threads_lock = NULL; Monitor* CGC_lock = NULL; -Mutex* STS_init_lock = NULL; +Monitor* STS_lock = NULL; Monitor* SLT_lock = NULL; Monitor* iCMS_lock = NULL; Monitor* FullGCCount_lock = NULL; @@ -173,7 +173,7 @@ def(tty_lock , Mutex , event, true ); // allow to lock in VM def(CGC_lock , Monitor, special, true ); // coordinate between fore- and background GC - def(STS_init_lock , Mutex, leaf, true ); + def(STS_lock , Monitor, leaf, true ); if (UseConcMarkSweepGC) { def(iCMS_lock , Monitor, special, true ); // CMS incremental mode start/stop notification } --- ./hotspot/src/share/vm/runtime/mutexLocker.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/mutexLocker.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -79,7 +79,7 @@ // (also used by Safepoints too to block threads creation/destruction) extern Monitor* CGC_lock; // used for coordination between // fore- & background GC threads. -extern Mutex* STS_init_lock; // coordinate initialization of SuspendibleThreadSets. +extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet. extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL extern Monitor* iCMS_lock; // CMS incremental mode start/stop notification extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc --- ./hotspot/src/share/vm/runtime/objectMonitor.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/objectMonitor.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -32,6 +32,7 @@ #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/runtime/orderAccess.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright 2014 SAP AG. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP +#define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP + +#include "runtime/orderAccess.hpp" + +// Linux +#ifdef TARGET_OS_ARCH_linux_x86 +# include "orderAccess_linux_x86.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_sparc +# include "orderAccess_linux_sparc.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_zero +# include "orderAccess_linux_zero.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_arm +# include "orderAccess_linux_arm.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_ppc +# include "orderAccess_linux_ppc.inline.hpp" +#endif + +// Solaris +#ifdef TARGET_OS_ARCH_solaris_x86 +# include "orderAccess_solaris_x86.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_solaris_sparc +# include "orderAccess_solaris_sparc.inline.hpp" +#endif + +// Windows +#ifdef TARGET_OS_ARCH_windows_x86 +# include "orderAccess_windows_x86.inline.hpp" +#endif + +// AIX +#ifdef TARGET_OS_ARCH_aix_ppc +# include "orderAccess_aix_ppc.inline.hpp" +#endif + +// BSD +#ifdef TARGET_OS_ARCH_bsd_x86 +# include "orderAccess_bsd_x86.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_bsd_zero +# include "orderAccess_bsd_zero.inline.hpp" +#endif + +#endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP --- ./hotspot/src/share/vm/runtime/os.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/os.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -32,6 +32,9 @@ #include "gc_implementation/shared/vmGCOperations.hpp" #include "interpreter/interpreter.hpp" #include "memory/allocation.inline.hpp" +#ifdef ASSERT +#include "memory/guardedMemory.hpp" +#endif #include "oops/oop.inline.hpp" #include "prims/jvm.h" #include "prims/jvm_misc.hpp" @@ -46,6 +49,8 @@ #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "services/attachListener.hpp" +#include "services/nmtCommon.hpp" +#include "services/mallocTracker.hpp" #include "services/memTracker.hpp" #include "services/threadService.hpp" #include "utilities/defaultStream.hpp" @@ -524,118 +529,16 @@ -#ifdef ASSERT -#define space_before (MallocCushion + sizeof(double)) -#define space_after MallocCushion -#define size_addr_from_base(p) (size_t*)(p + space_before - sizeof(size_t)) -#define size_addr_from_obj(p) ((size_t*)p - 1) -// MallocCushion: size of extra cushion allocated around objects with +UseMallocOnly -// NB: cannot be debug variable, because these aren't set from the command line until -// *after* the first few allocs already happened -#define MallocCushion 16 -#else -#define space_before 0 -#define space_after 0 -#define size_addr_from_base(p) should not use w/o ASSERT -#define size_addr_from_obj(p) should not use w/o ASSERT -#define MallocCushion 0 -#endif #define paranoid 0 /* only set to 1 if you suspect checking code has bug */ #ifdef ASSERT -inline size_t get_size(void* obj) { - size_t size = *size_addr_from_obj(obj); - if (size < 0) { - fatal(err_msg("free: size field of object #" PTR_FORMAT " was overwritten (" - SIZE_FORMAT ")", obj, size)); - } - return size; -} - -u_char* find_cushion_backwards(u_char* start) { - u_char* p = start; - while (p[ 0] != badResourceValue || p[-1] != badResourceValue || - p[-2] != badResourceValue || p[-3] != badResourceValue) p--; - // ok, we have four consecutive marker bytes; find start - u_char* q = p - 4; - while (*q == badResourceValue) q--; - return q + 1; -} - -u_char* find_cushion_forwards(u_char* start) { - u_char* p = start; - while (p[0] != badResourceValue || p[1] != badResourceValue || - p[2] != badResourceValue || p[3] != badResourceValue) p++; - // ok, we have four consecutive marker bytes; find end of cushion - u_char* q = p + 4; - while (*q == badResourceValue) q++; - return q - MallocCushion; -} - -void print_neighbor_blocks(void* ptr) { - // find block allocated before ptr (not entirely crash-proof) - if (MallocCushion < 4) { - tty->print_cr("### cannot find previous block (MallocCushion < 4)"); - return; - } - u_char* start_of_this_block = (u_char*)ptr - space_before; - u_char* end_of_prev_block_data = start_of_this_block - space_after -1; - // look for cushion in front of prev. block - u_char* start_of_prev_block = find_cushion_backwards(end_of_prev_block_data); - ptrdiff_t size = *size_addr_from_base(start_of_prev_block); - u_char* obj = start_of_prev_block + space_before; - if (size <= 0 ) { - // start is bad; mayhave been confused by OS data inbetween objects - // search one more backwards - start_of_prev_block = find_cushion_backwards(start_of_prev_block); - size = *size_addr_from_base(start_of_prev_block); - obj = start_of_prev_block + space_before; - } - - if (start_of_prev_block + space_before + size + space_after == start_of_this_block) { - tty->print_cr("### previous object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size); - } else { - tty->print_cr("### previous object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size); - } - - // now find successor block - u_char* start_of_next_block = (u_char*)ptr + *size_addr_from_obj(ptr) + space_after; - start_of_next_block = find_cushion_forwards(start_of_next_block); - u_char* next_obj = start_of_next_block + space_before; - ptrdiff_t next_size = *size_addr_from_base(start_of_next_block); - if (start_of_next_block[0] == badResourceValue && - start_of_next_block[1] == badResourceValue && - start_of_next_block[2] == badResourceValue && - start_of_next_block[3] == badResourceValue) { - tty->print_cr("### next object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size); - } else { - tty->print_cr("### next object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size); - } -} - - -void report_heap_error(void* memblock, void* bad, const char* where) { - tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees); - tty->print_cr("## memory stomp: byte at " PTR_FORMAT " %s object " PTR_FORMAT, bad, where, memblock); - print_neighbor_blocks(memblock); - fatal("memory stomping error"); -} - -void verify_block(void* memblock) { - size_t size = get_size(memblock); - if (MallocCushion) { - u_char* ptr = (u_char*)memblock - space_before; - for (int i = 0; i < MallocCushion; i++) { - if (ptr[i] != badResourceValue) { - report_heap_error(memblock, ptr+i, "in front of"); - } - } - u_char* end = (u_char*)memblock + size + space_after; - for (int j = -MallocCushion; j < 0; j++) { - if (end[j] != badResourceValue) { - report_heap_error(memblock, end+j, "after"); - } - } +static void verify_memory(void* ptr) { + GuardedMemory guarded(ptr); + if (!guarded.verify_guards()) { + tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees); + tty->print_cr("## memory stomp:"); + guarded.print_on(tty); + fatal("memory stomping error"); } } #endif @@ -660,7 +563,11 @@ return ptr; } -void* os::malloc(size_t size, MEMFLAGS memflags, address caller) { +void* os::malloc(size_t size, MEMFLAGS flags) { + return os::malloc(size, flags, CALLER_PC); +} + +void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); @@ -686,16 +593,22 @@ size = 1; } - const size_t alloc_size = size + space_before + space_after; + // NMT support + NMT_TrackingLevel level = MemTracker::tracking_level(); + size_t nmt_header_size = MemTracker::malloc_header_size(level); - if (size > alloc_size) { // Check for rollover. +#ifndef ASSERT + const size_t alloc_size = size + nmt_header_size; +#else + const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size); + if (size + nmt_header_size > alloc_size) { // Check for rollover. return NULL; } +#endif NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap()); u_char* ptr; - if (MallocMaxTestWords > 0) { ptr = testMalloc(alloc_size); } else { @@ -703,67 +616,73 @@ } #ifdef ASSERT - if (ptr == NULL) return NULL; - if (MallocCushion) { - for (u_char* p = ptr; p < ptr + MallocCushion; p++) *p = (u_char)badResourceValue; - u_char* end = ptr + space_before + size; - for (u_char* pq = ptr+MallocCushion; pq < end; pq++) *pq = (u_char)uninitBlockPad; - for (u_char* q = end; q < end + MallocCushion; q++) *q = (u_char)badResourceValue; + if (ptr == NULL) { + return NULL; } - // put size just before data - *size_addr_from_base(ptr) = size; + // Wrap memory with guard + GuardedMemory guarded(ptr, size + nmt_header_size); + ptr = guarded.get_user_ptr(); #endif - u_char* memblock = ptr + space_before; - if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) { - tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock); + if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) { + tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr); breakpoint(); } - debug_only(if (paranoid) verify_block(memblock)); - if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock); + debug_only(if (paranoid) verify_memory(ptr)); + if (PrintMalloc && tty != NULL) { + tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr); + } - // we do not track MallocCushion memory - MemTracker::record_malloc((address)memblock, size, memflags, caller == 0 ? CALLER_PC : caller); - - return memblock; + // we do not track guard memory + return MemTracker::record_malloc((address)ptr, size, memflags, stack, level); } +void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) { + return os::realloc(memblock, size, flags, CALLER_PC); +} -void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) { +void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { + #ifndef ASSERT NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); - MemTracker::Tracker tkr = MemTracker::get_realloc_tracker(); - void* ptr = ::realloc(memblock, size); - if (ptr != NULL) { - tkr.record((address)memblock, (address)ptr, size, memflags, - caller == 0 ? CALLER_PC : caller); - } else { - tkr.discard(); - } - return ptr; + // NMT support + void* membase = MemTracker::record_free(memblock); + NMT_TrackingLevel level = MemTracker::tracking_level(); + size_t nmt_header_size = MemTracker::malloc_header_size(level); + void* ptr = ::realloc(membase, size + nmt_header_size); + return MemTracker::record_malloc(ptr, size, memflags, stack, level); #else if (memblock == NULL) { - return malloc(size, memflags, (caller == 0 ? CALLER_PC : caller)); + return os::malloc(size, memflags, stack); } if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) { tty->print_cr("os::realloc caught " PTR_FORMAT, memblock); breakpoint(); } - verify_block(memblock); + // NMT support + void* membase = MemTracker::malloc_base(memblock); + verify_memory(membase); NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap()); - if (size == 0) return NULL; + if (size == 0) { + return NULL; + } // always move the block - void* ptr = malloc(size, memflags, caller == 0 ? CALLER_PC : caller); - if (PrintMalloc) tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr); + void* ptr = os::malloc(size, memflags, stack); + if (PrintMalloc) { + tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr); + } // Copy to new memory if malloc didn't fail if ( ptr != NULL ) { - memcpy(ptr, memblock, MIN2(size, get_size(memblock))); - if (paranoid) verify_block(ptr); + GuardedMemory guarded(MemTracker::malloc_base(memblock)); + // Guard's user data contains NMT header + size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock); + memcpy(ptr, memblock, MIN2(size, memblock_size)); + if (paranoid) verify_memory(MemTracker::malloc_base(ptr)); if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) { tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr); breakpoint(); } - free(memblock); + os::free(memblock); } return ptr; #endif @@ -778,34 +697,22 @@ if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock); breakpoint(); } - verify_block(memblock); + void* membase = MemTracker::record_free(memblock); + verify_memory(membase); NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap()); - // Added by detlefs. - if (MallocCushion) { - u_char* ptr = (u_char*)memblock - space_before; - for (u_char* p = ptr; p < ptr + MallocCushion; p++) { - guarantee(*p == badResourceValue, - "Thing freed should be malloc result."); - *p = (u_char)freeBlockPad; - } - size_t size = get_size(memblock); - inc_stat_counter(&free_bytes, size); - u_char* end = ptr + space_before + size; - for (u_char* q = end; q < end + MallocCushion; q++) { - guarantee(*q == badResourceValue, - "Thing freed should be malloc result."); - *q = (u_char)freeBlockPad; - } - if (PrintMalloc && tty != NULL) - fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock); - } else if (PrintMalloc && tty != NULL) { - // tty->print_cr("os::free %p", memblock); - fprintf(stderr, "os::free " PTR_FORMAT "\n", (uintptr_t)memblock); + + GuardedMemory guarded(membase); + size_t size = guarded.get_user_size(); + inc_stat_counter(&free_bytes, size); + membase = guarded.release_for_freeing(); + if (PrintMalloc && tty != NULL) { + fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)membase); } + ::free(membase); +#else + void* membase = MemTracker::record_free(memblock); + ::free(membase); #endif - MemTracker::record_free((address)memblock, memflags); - - ::free((char*)memblock - space_before); } void os::init_random(long initval) { @@ -1519,7 +1426,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { char* result = pd_reserve_memory(bytes, addr, alignment_hint); if (result != NULL) { - MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC); + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); } return result; @@ -1529,7 +1436,7 @@ MEMFLAGS flags) { char* result = pd_reserve_memory(bytes, addr, alignment_hint); if (result != NULL) { - MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC); + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); MemTracker::record_virtual_memory_type((address)result, flags); } @@ -1539,7 +1446,7 @@ char* os::attempt_reserve_memory_at(size_t bytes, char* addr) { char* result = pd_attempt_reserve_memory_at(bytes, addr); if (result != NULL) { - MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC); + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); } return result; } @@ -1579,23 +1486,29 @@ } bool os::uncommit_memory(char* addr, size_t bytes) { - MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); - bool res = pd_uncommit_memory(addr, bytes); - if (res) { - tkr.record((address)addr, bytes); + bool res; + if (MemTracker::tracking_level() > NMT_minimal) { + Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); + res = pd_uncommit_memory(addr, bytes); + if (res) { + tkr.record((address)addr, bytes); + } } else { - tkr.discard(); + res = pd_uncommit_memory(addr, bytes); } return res; } bool os::release_memory(char* addr, size_t bytes) { - MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); - bool res = pd_release_memory(addr, bytes); - if (res) { - tkr.record((address)addr, bytes); + bool res; + if (MemTracker::tracking_level() > NMT_minimal) { + Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); + res = pd_release_memory(addr, bytes); + if (res) { + tkr.record((address)addr, bytes); + } } else { - tkr.discard(); + res = pd_release_memory(addr, bytes); } return res; } @@ -1606,7 +1519,7 @@ bool allow_exec) { char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); if (result != NULL) { - MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); } return result; } @@ -1619,12 +1532,15 @@ } bool os::unmap_memory(char *addr, size_t bytes) { - MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); - bool result = pd_unmap_memory(addr, bytes); - if (result) { - tkr.record((address)addr, bytes); + bool result; + if (MemTracker::tracking_level() > NMT_minimal) { + Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); + result = pd_unmap_memory(addr, bytes); + if (result) { + tkr.record((address)addr, bytes); + } } else { - tkr.discard(); + result = pd_unmap_memory(addr, bytes); } return result; } --- ./hotspot/src/share/vm/runtime/os.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/os.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -66,6 +66,8 @@ class Event; class DLL; class FileHandle; +class NativeCallStack; + template class GrowableArray; // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose @@ -97,9 +99,11 @@ // Typedef for structured exception handling support typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); +class MallocTracker; + class os: AllStatic { friend class VMStructs; - + friend class MallocTracker; public: enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel) @@ -161,7 +165,10 @@ // Override me as needed static int file_name_strcmp(const char* s1, const char* s2); + // get/unset environment variable static bool getenv(const char* name, char* buffer, int len); + static bool unsetenv(const char* name); + static bool have_special_privileges(); static jlong javaTimeMillis(); @@ -207,8 +214,14 @@ // Interface for detecting multiprocessor system static inline bool is_MP() { - assert(_processor_count > 0, "invalid processor count"); - return _processor_count > 1 || AssumeMP; + // During bootstrap if _processor_count is not yet initialized + // we claim to be MP as that is safest. If any platform has a + // stub generator that might be triggered in this phase and for + // which being declared MP when in fact not, is a problem - then + // the bootstrap routine for the stub generator needs to check + // the processor count directly and leave the bootstrap routine + // in place until called after initialization has ocurred. + return (_processor_count != 1) || AssumeMP; } static julong available_memory(); static julong physical_memory(); @@ -651,12 +664,20 @@ static void* thread_local_storage_at(int index); static void free_thread_local_storage(int index); - // Stack walk - static address get_caller_pc(int n = 0); + // Retrieve native stack frames. + // Parameter: + // stack: an array to storage stack pointers. + // frames: size of above array. + // toSkip: number of stack frames to skip at the beginning. + // Return: number of stack frames captured. + static int get_native_stack(address* stack, int size, int toSkip = 0); // General allocation (must be MT-safe) - static void* malloc (size_t size, MEMFLAGS flags, address caller_pc = 0); - static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0); + static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack); + static void* malloc (size_t size, MEMFLAGS flags); + static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack); + static void* realloc (void *memblock, size_t size, MEMFLAGS flag); + static void free (void *memblock, MEMFLAGS flags = mtNone); static bool check_heap(bool force = false); // verify C heap integrity static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup --- ./hotspot/src/share/vm/runtime/perfMemory.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/perfMemory.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,6 +28,7 @@ #include "runtime/java.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" #include "runtime/perfData.hpp" #include "runtime/perfMemory.hpp" --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/runtime/prefetch.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP +#define SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP + +#include "runtime/prefetch.hpp" + +// Linux +#ifdef TARGET_OS_ARCH_linux_x86 +# include "prefetch_linux_x86.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_sparc +# include "prefetch_linux_sparc.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_zero +# include "prefetch_linux_zero.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_arm +# include "prefetch_linux_arm.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_ppc +# include "prefetch_linux_ppc.inline.hpp" +#endif + +// Solaris +#ifdef TARGET_OS_ARCH_solaris_x86 +# include "prefetch_solaris_x86.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_solaris_sparc +# include "prefetch_solaris_sparc.inline.hpp" +#endif + +// Windows +#ifdef TARGET_OS_ARCH_windows_x86 +# include "prefetch_windows_x86.inline.hpp" +#endif + +// AIX +#ifdef TARGET_OS_ARCH_aix_ppc +# include "prefetch_aix_ppc.inline.hpp" +#endif + +// BSD +#ifdef TARGET_OS_ARCH_bsd_x86 +# include "prefetch_bsd_x86.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_bsd_zero +# include "prefetch_bsd_zero.inline.hpp" +#endif + +#endif // SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP --- ./hotspot/src/share/vm/runtime/safepoint.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/safepoint.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -41,6 +41,7 @@ #include "runtime/frame.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/safepoint.hpp" #include "runtime/signature.hpp" @@ -49,7 +50,6 @@ #include "runtime/sweeper.hpp" #include "runtime/synchronizer.hpp" #include "runtime/thread.inline.hpp" -#include "services/memTracker.hpp" #include "services/runtimeService.hpp" #include "utilities/events.hpp" #include "utilities/macros.hpp" @@ -75,7 +75,7 @@ #endif #if INCLUDE_ALL_GCS #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" -#include "gc_implementation/shared/concurrentGCThread.hpp" +#include "gc_implementation/shared/suspendibleThreadSet.hpp" #endif // INCLUDE_ALL_GCS #ifdef COMPILER1 #include "c1/c1_globals.hpp" @@ -112,7 +112,7 @@ // more-general mechanism below. DLD (01/05). ConcurrentMarkSweepThread::synchronize(false); } else if (UseG1GC) { - ConcurrentGCThread::safepoint_synchronize(); + SuspendibleThreadSet::synchronize(); } #endif // INCLUDE_ALL_GCS @@ -488,7 +488,7 @@ if (UseConcMarkSweepGC) { ConcurrentMarkSweepThread::desynchronize(false); } else if (UseG1GC) { - ConcurrentGCThread::safepoint_desynchronize(); + SuspendibleThreadSet::desynchronize(); } #endif // INCLUDE_ALL_GCS // record this time so VMThread can keep track how much time has elasped @@ -546,10 +546,6 @@ TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime); ClassLoaderDataGraph::purge_if_needed(); } - - if (MemTracker::is_on()) { - MemTracker::sync(); - } } --- ./hotspot/src/share/vm/runtime/serviceThread.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/serviceThread.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,6 +28,7 @@ #include "runtime/serviceThread.hpp" #include "runtime/mutexLocker.hpp" #include "prims/jvmtiImpl.hpp" +#include "services/allocationContextService.hpp" #include "services/gcNotifier.hpp" #include "services/diagnosticArgument.hpp" #include "services/diagnosticFramework.hpp" @@ -86,6 +87,7 @@ bool has_jvmti_events = false; bool has_gc_notification_event = false; bool has_dcmd_notification_event = false; + bool acs_notify = false; JvmtiDeferredEvent jvmti_event; { // Need state transition ThreadBlockInVM so that this thread @@ -102,7 +104,8 @@ while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) && !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) && !(has_gc_notification_event = GCNotifier::has_event()) && - !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) { + !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) && + !(acs_notify = AllocationContextService::should_notify())) { // wait until one of the sensors has pending requests, or there is a // pending JVMTI event or JMX GC notification to post Service_lock->wait(Mutex::_no_safepoint_check_flag); @@ -128,6 +131,10 @@ if(has_dcmd_notification_event) { DCmdFactory::send_notification(CHECK); } + + if (acs_notify) { + AllocationContextService::notify(CHECK); + } } } --- ./hotspot/src/share/vm/runtime/sharedRuntime.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/sharedRuntime.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -482,6 +482,7 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) { assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address)); + assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?"); // Reset method handle flag. thread->set_is_method_handle_return(false); @@ -1209,10 +1210,7 @@ (!is_virtual && invoke_code == Bytecodes::_invokedynamic) || ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode"); - // We do not patch the call site if the caller nmethod has been made non-entrant. - if (!caller_nm->is_in_use()) { - return callee_method; - } + assert(caller_nm->is_alive(), "It should be alive"); #ifndef PRODUCT // tracing/debugging/statistics @@ -1282,13 +1280,11 @@ // Now that we are ready to patch if the Method* was redefined then // don't update call site and let the caller retry. - // Don't update call site if caller nmethod has been made non-entrant - // as it is a waste of time. // Don't update call site if callee nmethod was unloaded or deoptimized. // Don't update call site if callee nmethod was replaced by an other nmethod // which may happen when multiply alive nmethod (tiered compilation) // will be supported. - if (!callee_method->is_old() && caller_nm->is_in_use() && + if (!callee_method->is_old() && (callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) { #ifdef ASSERT // We must not try to patch to jump to an already unloaded method. @@ -1489,14 +1485,12 @@ // out of scope. JvmtiDynamicCodeEventCollector event_collector; - // Update inline cache to megamorphic. Skip update if caller has been - // made non-entrant or we are called from interpreted. + // Update inline cache to megamorphic. Skip update if we are called from interpreted. { MutexLocker ml_patch (CompiledIC_lock); RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(®_map); CodeBlob* cb = caller_frame.cb(); - if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) { - // Not a non-entrant nmethod, so find inline_cache + if (cb->is_nmethod()) { CompiledIC* inline_cache = CompiledIC_before(((nmethod*)cb), caller_frame.pc()); bool should_be_mono = false; if (inline_cache->is_optimized()) { @@ -1639,19 +1633,13 @@ // resolve is only done once. MutexLocker ml(CompiledIC_lock); - // - // We do not patch the call site if the nmethod has been made non-entrant - // as it is a waste of time - // - if (caller_nm->is_in_use()) { - if (is_static_call) { - CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); - ssc->set_to_clean(); - } else { - // compiled, dispatched call (which used to call an interpreted method) - CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); - inline_cache->set_to_clean(); - } + if (is_static_call) { + CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); + ssc->set_to_clean(); + } else { + // compiled, dispatched call (which used to call an interpreted method) + CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); + inline_cache->set_to_clean(); } } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/runtime/sharedRuntimeMath.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_SHAREDRUNTIMEMATH_HPP +#define SHARE_VM_RUNTIME_SHAREDRUNTIMEMATH_HPP + +#include + +// Used to access the lower/higher 32 bits of a double +typedef union { + double d; + struct { +#ifdef VM_LITTLE_ENDIAN + int lo; + int hi; +#else + int hi; + int lo; +#endif + } split; +} DoubleIntConv; + +static inline int high(double d) { + DoubleIntConv x; + x.d = d; + return x.split.hi; +} + +static inline int low(double d) { + DoubleIntConv x; + x.d = d; + return x.split.lo; +} + +static inline void set_high(double* d, int high) { + DoubleIntConv conv; + conv.d = *d; + conv.split.hi = high; + *d = conv.d; +} + +static inline void set_low(double* d, int low) { + DoubleIntConv conv; + conv.d = *d; + conv.split.lo = low; + *d = conv.d; +} + +static double copysignA(double x, double y) { + DoubleIntConv convX; + convX.d = x; + convX.split.hi = (convX.split.hi & 0x7fffffff) | (high(y) & 0x80000000); + return convX.d; +} + +/* + * ==================================================== + * Copyright (c) 1998 Oracle and/or its affiliates. All rights reserved. + * + * Developed at SunSoft, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +/* + * scalbn (double x, int n) + * scalbn(x,n) returns x* 2**n computed by exponent + * manipulation rather than by actually performing an + * exponentiation or a multiplication. + */ + +static const double +two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */ +twom54 = 5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */ +hugeX = 1.0e+300, +tiny = 1.0e-300; + +static double scalbnA(double x, int n) { + int k,hx,lx; + hx = high(x); + lx = low(x); + k = (hx&0x7ff00000)>>20; /* extract exponent */ + if (k==0) { /* 0 or subnormal x */ + if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */ + x *= two54; + hx = high(x); + k = ((hx&0x7ff00000)>>20) - 54; + if (n< -50000) return tiny*x; /*underflow*/ + } + if (k==0x7ff) return x+x; /* NaN or Inf */ + k = k+n; + if (k > 0x7fe) return hugeX*copysignA(hugeX,x); /* overflow */ + if (k > 0) { /* normal result */ + set_high(&x, (hx&0x800fffff)|(k<<20)); + return x; + } + if (k <= -54) { + if (n > 50000) /* in case integer overflow in n+k */ + return hugeX*copysignA(hugeX,x); /*overflow*/ + else return tiny*copysignA(tiny,x); /*underflow*/ + } + k += 54; /* subnormal result */ + set_high(&x, (hx&0x800fffff)|(k<<20)); + return x*twom54; +} + +#endif // SHARE_VM_RUNTIME_SHAREDRUNTIMEMATH_HPP --- ./hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,81 +40,11 @@ // generated; can not figure out how to turn down optimization for one // file in the IDE on Windows #ifdef WIN32 +# pragma warning( disable: 4748 ) // /GS can not protect parameters and local variables from local buffer overrun because optimizations are disabled in function # pragma optimize ( "", off ) #endif -#include - -// VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles -// [jk] this is not 100% correct because the float word order may different -// from the byte order (e.g. on ARM) -#ifdef VM_LITTLE_ENDIAN -# define __HI(x) *(1+(int*)&x) -# define __LO(x) *(int*)&x -#else -# define __HI(x) *(int*)&x -# define __LO(x) *(1+(int*)&x) -#endif - -#if !defined(AIX) -double copysign(double x, double y) { - __HI(x) = (__HI(x)&0x7fffffff)|(__HI(y)&0x80000000); - return x; -} -#endif - -/* - * ==================================================== - * Copyright (c) 1998 Oracle and/or its affiliates. All rights reserved. - * - * Developed at SunSoft, a Sun Microsystems, Inc. business. - * Permission to use, copy, modify, and distribute this - * software is freely granted, provided that this notice - * is preserved. - * ==================================================== - */ - -/* - * scalbn (double x, int n) - * scalbn(x,n) returns x* 2**n computed by exponent - * manipulation rather than by actually performing an - * exponentiation or a multiplication. - */ - -static const double -two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */ - twom54 = 5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */ - hugeX = 1.0e+300, - tiny = 1.0e-300; - -#if !defined(AIX) -double scalbn (double x, int n) { - int k,hx,lx; - hx = __HI(x); - lx = __LO(x); - k = (hx&0x7ff00000)>>20; /* extract exponent */ - if (k==0) { /* 0 or subnormal x */ - if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */ - x *= two54; - hx = __HI(x); - k = ((hx&0x7ff00000)>>20) - 54; - if (n< -50000) return tiny*x; /*underflow*/ - } - if (k==0x7ff) return x+x; /* NaN or Inf */ - k = k+n; - if (k > 0x7fe) return hugeX*copysign(hugeX,x); /* overflow */ - if (k > 0) /* normal result */ - {__HI(x) = (hx&0x800fffff)|(k<<20); return x;} - if (k <= -54) { - if (n > 50000) /* in case integer overflow in n+k */ - return hugeX*copysign(hugeX,x); /*overflow*/ - else return tiny*copysign(tiny,x); /*underflow*/ - } - k += 54; /* subnormal result */ - __HI(x) = (hx&0x800fffff)|(k<<20); - return x*twom54; -} -#endif +#include "runtime/sharedRuntimeMath.hpp" /* __ieee754_log(x) * Return the logrithm of x @@ -185,8 +115,8 @@ int k,hx,i,j; unsigned lx; - hx = __HI(x); /* high word of x */ - lx = __LO(x); /* low word of x */ + hx = high(x); /* high word of x */ + lx = low(x); /* low word of x */ k=0; if (hx < 0x00100000) { /* x < 2**-1022 */ @@ -194,13 +124,13 @@ return -two54/zero; /* log(+-0)=-inf */ if (hx<0) return (x-x)/zero; /* log(-#) = NaN */ k -= 54; x *= two54; /* subnormal number, scale up x */ - hx = __HI(x); /* high word of x */ + hx = high(x); /* high word of x */ } if (hx >= 0x7ff00000) return x+x; k += (hx>>20)-1023; hx &= 0x000fffff; i = (hx+0x95f64)&0x100000; - __HI(x) = hx|(i^0x3ff00000); /* normalize x or x/2 */ + set_high(&x, hx|(i^0x3ff00000)); /* normalize x or x/2 */ k += (i>>20); f = x-1.0; if((0x000fffff&(2+hx))<3) { /* |f| < 2**-20 */ @@ -279,8 +209,8 @@ int i,k,hx; unsigned lx; - hx = __HI(x); /* high word of x */ - lx = __LO(x); /* low word of x */ + hx = high(x); /* high word of x */ + lx = low(x); /* low word of x */ k=0; if (hx < 0x00100000) { /* x < 2**-1022 */ @@ -288,14 +218,14 @@ return -two54/zero; /* log(+-0)=-inf */ if (hx<0) return (x-x)/zero; /* log(-#) = NaN */ k -= 54; x *= two54; /* subnormal number, scale up x */ - hx = __HI(x); /* high word of x */ + hx = high(x); /* high word of x */ } if (hx >= 0x7ff00000) return x+x; k += (hx>>20)-1023; i = ((unsigned)k&0x80000000)>>31; hx = (hx&0x000fffff)|((0x3ff-i)<<20); y = (double)(k+i); - __HI(x) = hx; + set_high(&x, hx); z = y*log10_2lo + ivln10*__ieee754_log(x); return z+y*log10_2hi; } @@ -390,14 +320,14 @@ int k=0,xsb; unsigned hx; - hx = __HI(x); /* high word of x */ + hx = high(x); /* high word of x */ xsb = (hx>>31)&1; /* sign bit of x */ hx &= 0x7fffffff; /* high word of |x| */ /* filter out non-finite argument */ if(hx >= 0x40862E42) { /* if |x|>=709.78... */ if(hx>=0x7ff00000) { - if(((hx&0xfffff)|__LO(x))!=0) + if(((hx&0xfffff)|low(x))!=0) return x+x; /* NaN */ else return (xsb==0)? x:0.0; /* exp(+-inf)={inf,0} */ } @@ -428,10 +358,10 @@ if(k==0) return one-((x*c)/(c-2.0)-x); else y = one-((lo-(x*c)/(2.0-c))-hi); if(k >= -1021) { - __HI(y) += (k<<20); /* add k to y's exponent */ + set_high(&y, high(y) + (k<<20)); /* add k to y's exponent */ return y; } else { - __HI(y) += ((k+1000)<<20);/* add k to y's exponent */ + set_high(&y, high(y) + ((k+1000)<<20)); /* add k to y's exponent */ return y*twom1000; } } @@ -518,8 +448,8 @@ unsigned lx,ly; i0 = ((*(int*)&one)>>29)^1; i1=1-i0; - hx = __HI(x); lx = __LO(x); - hy = __HI(y); ly = __LO(y); + hx = high(x); lx = low(x); + hy = high(y); ly = low(y); ix = hx&0x7fffffff; iy = hy&0x7fffffff; /* y==zero: x**0 = 1 */ @@ -619,14 +549,14 @@ u = ivln2_h*t; /* ivln2_h has 21 sig. bits */ v = t*ivln2_l-w*ivln2; t1 = u+v; - __LO(t1) = 0; + set_low(&t1, 0); t2 = v-(t1-u); } else { double ss,s2,s_h,s_l,t_h,t_l; n = 0; /* take care subnormal number */ if(ix<0x00100000) - {ax *= two53; n -= 53; ix = __HI(ax); } + {ax *= two53; n -= 53; ix = high(ax); } n += ((ix)>>20)-0x3ff; j = ix&0x000fffff; /* determine interval */ @@ -634,17 +564,17 @@ if(j<=0x3988E) k=0; /* |x|>1)|0x20000000)+0x00080000+(k<<18); + set_high(&t_h, ((ix>>1)|0x20000000)+0x00080000+(k<<18)); t_l = ax - (t_h-bp[k]); s_l = v*((u-s_h*t_h)-s_h*t_l); /* compute log(ax) */ @@ -653,32 +583,32 @@ r += s_l*(s_h+ss); s2 = s_h*s_h; t_h = 3.0+s2+r; - __LO(t_h) = 0; + set_low(&t_h, 0); t_l = r-((t_h-3.0)-s2); /* u+v = ss*(1+...) */ u = s_h*t_h; v = s_l*t_h+t_l*ss; /* 2/(3log2)*(ss+...) */ p_h = u+v; - __LO(p_h) = 0; + set_low(&p_h, 0); p_l = v-(p_h-u); z_h = cp_h*p_h; /* cp_h+cp_l = 2/(3*log2) */ z_l = cp_l*p_h+p_l*cp+dp_l[k]; /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */ t = (double)n; t1 = (((z_h+z_l)+dp_h[k])+t); - __LO(t1) = 0; + set_low(&t1, 0); t2 = z_l-(((t1-t)-dp_h[k])-z_h); } /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */ y1 = y; - __LO(y1) = 0; + set_low(&y1, 0); p_l = (y-y1)*t1+y*t2; p_h = y1*t1; z = p_l+p_h; - j = __HI(z); - i = __LO(z); + j = high(z); + i = low(z); if (j>=0x40900000) { /* z >= 1024 */ if(((j-0x40900000)|i)!=0) /* if z > 1024 */ return s*hugeX*hugeX; /* overflow */ @@ -702,13 +632,13 @@ n = j+(0x00100000>>(k+1)); k = ((n&0x7fffffff)>>20)-0x3ff; /* new k for n */ t = zeroX; - __HI(t) = (n&~(0x000fffff>>k)); + set_high(&t, (n&~(0x000fffff>>k))); n = ((n&0x000fffff)|0x00100000)>>(20-k); if(j<0) n = -n; p_h -= t; } t = p_l+p_h; - __LO(t) = 0; + set_low(&t, 0); u = t*lg2_h; v = (p_l-(t-p_h))*lg2+t*lg2_l; z = u+v; @@ -717,10 +647,10 @@ t1 = z - t*(P1+t*(P2+t*(P3+t*(P4+t*P5)))); r = (z*t1)/(t1-two)-(w+z*w); z = one-(r-z); - j = __HI(z); + j = high(z); j += (n<<20); - if((j>>20)<=0) z = scalbn(z,n); /* subnormal output */ - else __HI(z) += (n<<20); + if((j>>20)<=0) z = scalbnA(z,n); /* subnormal output */ + else set_high(&z, high(z) + (n<<20)); return s*z; } --- ./hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,63 +63,7 @@ #define SAFEBUF #endif -#include - -// VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles -// [jk] this is not 100% correct because the float word order may different -// from the byte order (e.g. on ARM) -#ifdef VM_LITTLE_ENDIAN -# define __HI(x) *(1+(int*)&x) -# define __LO(x) *(int*)&x -#else -# define __HI(x) *(int*)&x -# define __LO(x) *(1+(int*)&x) -#endif - -static double copysignA(double x, double y) { - __HI(x) = (__HI(x)&0x7fffffff)|(__HI(y)&0x80000000); - return x; -} - -/* - * scalbn (double x, int n) - * scalbn(x,n) returns x* 2**n computed by exponent - * manipulation rather than by actually performing an - * exponentiation or a multiplication. - */ - -static const double -two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */ -twom54 = 5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */ -hugeX = 1.0e+300, -tiny = 1.0e-300; - -static double scalbnA (double x, int n) { - int k,hx,lx; - hx = __HI(x); - lx = __LO(x); - k = (hx&0x7ff00000)>>20; /* extract exponent */ - if (k==0) { /* 0 or subnormal x */ - if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */ - x *= two54; - hx = __HI(x); - k = ((hx&0x7ff00000)>>20) - 54; - if (n< -50000) return tiny*x; /*underflow*/ - } - if (k==0x7ff) return x+x; /* NaN or Inf */ - k = k+n; - if (k > 0x7fe) return hugeX*copysignA(hugeX,x); /* overflow */ - if (k > 0) /* normal result */ - {__HI(x) = (hx&0x800fffff)|(k<<20); return x;} - if (k <= -54) { - if (n > 50000) /* in case integer overflow in n+k */ - return hugeX*copysignA(hugeX,x); /*overflow*/ - else return tiny*copysignA(tiny,x); /*underflow*/ - } - k += 54; /* subnormal result */ - __HI(x) = (hx&0x800fffff)|(k<<20); - return x*twom54; -} +#include "runtime/sharedRuntimeMath.hpp" /* * __kernel_rem_pio2(x,y,e0,nx,prec,ipio2) @@ -603,7 +547,7 @@ { double z,r,v; int ix; - ix = __HI(x)&0x7fffffff; /* high word of x */ + ix = high(x)&0x7fffffff; /* high word of x */ if(ix<0x3e400000) /* |x| < 2**-27 */ {if((int)x==0) return x;} /* generate inexact */ z = x*x; @@ -658,9 +602,9 @@ static double __kernel_cos(double x, double y) { - double a,h,z,r,qx; + double a,h,z,r,qx=0; int ix; - ix = __HI(x)&0x7fffffff; /* ix = |x|'s high word*/ + ix = high(x)&0x7fffffff; /* ix = |x|'s high word*/ if(ix<0x3e400000) { /* if x < 2**27 */ if(((int)x)==0) return one; /* generate inexact */ } @@ -672,8 +616,8 @@ if(ix > 0x3fe90000) { /* x > 0.78125 */ qx = 0.28125; } else { - __HI(qx) = ix-0x00200000; /* x/4 */ - __LO(qx) = 0; + set_high(&qx, ix-0x00200000); /* x/4 */ + set_low(&qx, 0); } h = 0.5*z-qx; a = one-qx; @@ -738,11 +682,11 @@ { double z,r,v,w,s; int ix,hx; - hx = __HI(x); /* high word of x */ + hx = high(x); /* high word of x */ ix = hx&0x7fffffff; /* high word of |x| */ if(ix<0x3e300000) { /* x < 2**-28 */ if((int)x==0) { /* generate inexact */ - if (((ix | __LO(x)) | (iy + 1)) == 0) + if (((ix | low(x)) | (iy + 1)) == 0) return one / fabsd(x); else { if (iy == 1) @@ -751,10 +695,10 @@ double a, t; z = w = x + y; - __LO(z) = 0; + set_low(&z, 0); v = y - (z - x); t = a = -one / w; - __LO(t) = 0; + set_low(&t, 0); s = one + t * z; return t + a * (s + t * v); } @@ -789,10 +733,10 @@ /* compute -1.0/(x+r) accurately */ double a,t; z = w; - __LO(z) = 0; + set_low(&z, 0); v = r-(z - x); /* z+v = r+x */ t = a = -1.0/w; /* a = -1.0/w */ - __LO(t) = 0; + set_low(&t, 0); s = 1.0+t*z; return t+a*(s+t*v); } @@ -841,7 +785,7 @@ int n, ix; /* High word of x. */ - ix = __HI(x); + ix = high(x); /* |x| ~< pi/4 */ ix &= 0x7fffffff; @@ -899,7 +843,7 @@ int n, ix; /* High word of x. */ - ix = __HI(x); + ix = high(x); /* |x| ~< pi/4 */ ix &= 0x7fffffff; @@ -956,7 +900,7 @@ int n, ix; /* High word of x. */ - ix = __HI(x); + ix = high(x); /* |x| ~< pi/4 */ ix &= 0x7fffffff; --- ./hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -196,7 +196,6 @@ // Don't trigger other compiles in testing mode return NULL; } - nmethod *osr_nm = NULL; handle_counter_overflow(method()); if (method() != inlinee()) { @@ -210,14 +209,16 @@ if (bci == InvocationEntryBci) { method_invocation_event(method, inlinee, comp_level, nm, thread); } else { + // method == inlinee if the event originated in the main method method_back_branch_event(method, inlinee, bci, comp_level, nm, thread); - // method == inlinee if the event originated in the main method - int highest_level = inlinee->highest_osr_comp_level(); - if (highest_level > comp_level) { - osr_nm = inlinee->lookup_osr_nmethod_for(bci, highest_level, false); + // Check if event led to a higher level OSR compilation + nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, comp_level, false); + if (osr_nm != NULL && osr_nm->comp_level() > comp_level) { + // Perform OSR with new nmethod + return osr_nm; } } - return osr_nm; + return NULL; } // Check if the method can be compiled, change level if necessary @@ -239,7 +240,7 @@ if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) { return; } - if (!CompileBroker::compilation_is_in_queue(mh, bci)) { + if (!CompileBroker::compilation_is_in_queue(mh)) { if (PrintTieredEvents) { print_event(COMPILE, mh, mh, bci, level); } @@ -378,7 +379,7 @@ // Handle the invocation event. void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh, CompLevel level, nmethod* nm, JavaThread* thread) { - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) { + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { CompLevel next_level = call_event(mh(), level); if (next_level != level) { compile(mh, InvocationEntryBci, next_level, thread); @@ -391,8 +392,8 @@ void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh, int bci, CompLevel level, nmethod* nm, JavaThread* thread) { // If the method is already compiling, quickly bail out. - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) { - // Use loop event as an opportinity to also check there's been + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { + // Use loop event as an opportunity to also check there's been // enough calls. CompLevel cur_level = comp_level(mh()); CompLevel next_level = call_event(mh(), cur_level); --- ./hotspot/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -54,13 +54,17 @@ // Simple methods are as good being compiled with C1 as C2. // Determine if a given method is such a case. bool SimpleThresholdPolicy::is_trivial(Method* method) { - if (method->is_accessor()) return true; - if (method->code() != NULL) { - MethodData* mdo = method->method_data(); - if (mdo != NULL && mdo->num_loops() == 0 && - (method->code_size() < 5 || (mdo->num_blocks() < 4) && (method->code_size() < 15))) { - return !mdo->would_profile(); - } + if (method->is_accessor() || + method->is_constant_getter()) { + return true; + } + if (method->has_loops() || method->code_size() >= 15) { + return false; + } + MethodData* mdo = method->method_data(); + if (mdo != NULL && !mdo->would_profile() && + (method->code_size() < 5 || (mdo->num_blocks() < 4))) { + return true; } return false; } --- ./hotspot/src/share/vm/runtime/stubRoutines.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/stubRoutines.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -125,9 +125,18 @@ address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL; address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL; +address StubRoutines::_sha1_implCompress = NULL; +address StubRoutines::_sha1_implCompressMB = NULL; +address StubRoutines::_sha256_implCompress = NULL; +address StubRoutines::_sha256_implCompressMB = NULL; +address StubRoutines::_sha512_implCompress = NULL; +address StubRoutines::_sha512_implCompressMB = NULL; + address StubRoutines::_updateBytesCRC32 = NULL; address StubRoutines::_crc_table_adr = NULL; +address StubRoutines::_multiplyToLen = NULL; + double (* StubRoutines::_intrinsic_log )(double) = NULL; double (* StubRoutines::_intrinsic_log10 )(double) = NULL; double (* StubRoutines::_intrinsic_exp )(double) = NULL; --- ./hotspot/src/share/vm/runtime/stubRoutines.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/stubRoutines.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -207,9 +207,18 @@ static address _cipherBlockChaining_encryptAESCrypt; static address _cipherBlockChaining_decryptAESCrypt; + static address _sha1_implCompress; + static address _sha1_implCompressMB; + static address _sha256_implCompress; + static address _sha256_implCompressMB; + static address _sha512_implCompress; + static address _sha512_implCompressMB; + static address _updateBytesCRC32; static address _crc_table_adr; + static address _multiplyToLen; + // These are versions of the java.lang.Math methods which perform // the same operations as the intrinsic version. They are used for // constant folding in the compiler to ensure equivalence. If the @@ -356,9 +365,18 @@ static address cipherBlockChaining_encryptAESCrypt() { return _cipherBlockChaining_encryptAESCrypt; } static address cipherBlockChaining_decryptAESCrypt() { return _cipherBlockChaining_decryptAESCrypt; } + static address sha1_implCompress() { return _sha1_implCompress; } + static address sha1_implCompressMB() { return _sha1_implCompressMB; } + static address sha256_implCompress() { return _sha256_implCompress; } + static address sha256_implCompressMB() { return _sha256_implCompressMB; } + static address sha512_implCompress() { return _sha512_implCompress; } + static address sha512_implCompressMB() { return _sha512_implCompressMB; } + static address updateBytesCRC32() { return _updateBytesCRC32; } static address crc_table_addr() { return _crc_table_adr; } + static address multiplyToLen() {return _multiplyToLen; } + static address select_fill_function(BasicType t, bool aligned, const char* &name); static address zero_aligned_words() { return _zero_aligned_words; } --- ./hotspot/src/share/vm/runtime/sweeper.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/sweeper.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -33,8 +33,10 @@ #include "runtime/atomic.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/os.hpp" #include "runtime/sweeper.hpp" +#include "runtime/thread.inline.hpp" #include "runtime/vm_operations.hpp" #include "trace/tracing.hpp" #include "utilities/events.hpp" --- ./hotspot/src/share/vm/runtime/thread.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/thread.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -58,6 +58,7 @@ #include "runtime/memprofiler.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/safepoint.hpp" #include "runtime/sharedRuntime.hpp" @@ -233,6 +234,8 @@ // This initial value ==> never claimed. _oops_do_parity = 0; + _metadata_on_stack_buffer = NULL; + // the handle mark links itself to last_handle_mark new HandleMark(this); @@ -330,8 +333,7 @@ #if INCLUDE_NMT // record thread's native stack, stack grows downward address stack_low_addr = stack_base() - stack_size(); - MemTracker::record_thread_stack(stack_low_addr, stack_size(), this, - CURRENT_PC); + MemTracker::record_thread_stack(stack_low_addr, stack_size()); #endif // INCLUDE_NMT } @@ -349,7 +351,7 @@ #if INCLUDE_NMT if (_stack_base != NULL) { address low_stack_addr = stack_base() - stack_size(); - MemTracker::release_thread_stack(low_stack_addr, stack_size(), this); + MemTracker::release_thread_stack(low_stack_addr, stack_size()); #ifdef ASSERT set_stack_base(NULL); #endif @@ -833,7 +835,7 @@ return false; } -void Thread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { +void Thread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { active_handles()->oops_do(f); // Do oop for ThreadShadow f->do_oop((oop*)&_pending_exception); @@ -860,6 +862,7 @@ st->print("os_prio=%d ", os_prio); } st->print("tid=" INTPTR_FORMAT " ", this); + ext().print_on(st); osthread()->print_on(st); } debug_only(if (WizardMode) print_owned_locks_on(st);) @@ -939,7 +942,7 @@ cur != VMOperationRequest_lock && cur != VMOperationQueue_lock) || cur->rank() == Mutex::special) { - warning("Thread holding lock at safepoint that vm can block on: %s", cur->name()); + fatal(err_msg("Thread holding lock at safepoint that vm can block on: %s", cur->name())); } } } @@ -1441,9 +1444,6 @@ set_monitor_chunks(NULL); set_next(NULL); set_thread_state(_thread_new); -#if INCLUDE_NMT - set_recorder(NULL); -#endif _terminated = _not_terminated; _privileged_stack_top = NULL; _array_for_gc = NULL; @@ -1495,6 +1495,7 @@ _popframe_condition = popframe_inactive; _popframe_preserved_args = NULL; _popframe_preserved_args_size = 0; + _frames_to_pop_failed_realloc = 0; pd_initialize(); } @@ -1518,7 +1519,6 @@ _jni_attach_state = _not_attaching_via_jni; } assert(deferred_card_mark().is_empty(), "Default MemRegion ctor"); - _safepoint_visible = false; } bool JavaThread::reguard_stack(address cur_sp) { @@ -1581,7 +1581,6 @@ thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread : os::java_thread; os::create_thread(this, thr_type, stack_sz); - _safepoint_visible = false; // The _osthread may be NULL here because we ran out of memory (too many threads active). // We need to throw and OutOfMemoryError - however we cannot do this here because the caller // may hold a lock and all locks must be unlocked before throwing the exception (throwing @@ -1599,13 +1598,6 @@ tty->print_cr("terminate thread %p", this); } - // By now, this thread should already be invisible to safepoint, - // and its per-thread recorder also collected. - assert(!is_safepoint_visible(), "wrong state"); -#if INCLUDE_NMT - assert(get_recorder() == NULL, "Already collected"); -#endif // INCLUDE_NMT - // JSR166 -- return the parker to the free list Parker::Release(_parker); _parker = NULL ; @@ -2729,7 +2721,7 @@ } }; -void JavaThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { +void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { // Verify that the deferred card marks have been flushed. assert(deferred_card_mark().is_empty(), "Should be empty during GC"); @@ -3033,6 +3025,8 @@ // Push the Java priority down to the native thread; needs Threads_lock Thread::set_priority(this, prio); + prepare_ext(); + // Add the new thread to the Threads list and set it in motion. // We must have threads lock in order to call Threads::add. // It is crucial that we do not block before the thread is @@ -3252,7 +3246,7 @@ #endif } -void CompilerThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { +void CompilerThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { JavaThread::oops_do(f, cld_f, cf); if (_scanned_nmethod != NULL && cf != NULL) { // Safepoints can occur when the sweeper is scanning an nmethod so @@ -3369,11 +3363,6 @@ // intialize TLS ThreadLocalStorage::init(); - // Bootstrap native memory tracking, so it can start recording memory - // activities before worker thread is started. This is the first phase - // of bootstrapping, VM is currently running in single-thread mode. - MemTracker::bootstrap_single_thread(); - // Initialize output stream logging ostream_init_log(); @@ -3424,9 +3413,6 @@ // Initialize Java-Level synchronization subsystem ObjectMonitor::Initialize() ; - // Second phase of bootstrapping, VM is about entering multi-thread mode - MemTracker::bootstrap_multi_thread(); - // Initialize global modules jint status = init_globals(); if (status != JNI_OK) { @@ -3448,9 +3434,6 @@ // real raw monitor. VM is setup enough here for raw monitor enter. JvmtiExport::transition_pending_onload_raw_monitors(); - // Fully start NMT - MemTracker::start(); - // Create the VMThread { TraceTime timer("Start VMThread", TraceStartupTime); VMThread::create(); @@ -3898,6 +3881,24 @@ } } +JavaThread* Threads::find_java_thread_from_java_tid(jlong java_tid) { + assert(Threads_lock->owned_by_self(), "Must hold Threads_lock"); + + JavaThread* java_thread = NULL; + // Sequential search for now. Need to do better optimization later. + for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) { + oop tobj = thread->threadObj(); + if (!thread->is_exiting() && + tobj != NULL && + java_tid == java_lang_Thread::thread_id(tobj)) { + java_thread = thread; + break; + } + } + return java_thread; +} + + // Last thread running calls java.lang.Shutdown.shutdown() void JavaThread::invoke_shutdown_hooks() { HandleMark hm(this); @@ -4088,8 +4089,6 @@ daemon = false; } - p->set_safepoint_visible(true); - ThreadService::add_thread(p, daemon); // Possible GC point. @@ -4135,13 +4134,6 @@ // to do callbacks into the safepoint code. However, the safepoint code is not aware // of this thread since it is removed from the queue. p->set_terminated_value(); - - // Now, this thread is not visible to safepoint - p->set_safepoint_visible(false); - // once the thread becomes safepoint invisible, we can not use its per-thread - // recorder. And Threads::do_threads() no longer walks this thread, so we have - // to release its per-thread recorder here. - MemTracker::thread_exiting(p); } // unlock Threads_lock // Since Events::log uses a lock, we grab it outside the Threads_lock @@ -4166,22 +4158,22 @@ // uses the Threads_lock to gurantee this property. It also makes sure that // all threads gets blocked when exiting or starting). -void Threads::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { +void Threads::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { ALL_JAVA_THREADS(p) { p->oops_do(f, cld_f, cf); } VMThread::vm_thread()->oops_do(f, cld_f, cf); } -void Threads::possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { +void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { // Introduce a mechanism allowing parallel threads to claim threads as // root groups. Overhead should be small enough to use all the time, // even in sequential code. SharedHeap* sh = SharedHeap::heap(); // Cannot yet substitute active_workers for n_par_threads // because of G1CollectedHeap::verify() use of - // SharedHeap::process_strong_roots(). n_par_threads == 0 will - // turn off parallelism in process_strong_roots while active_workers + // SharedHeap::process_roots(). n_par_threads == 0 will + // turn off parallelism in process_roots while active_workers // is being used for parallelism elsewhere. bool is_par = sh->n_par_threads() > 0; assert(!is_par || --- ./hotspot/src/share/vm/runtime/thread.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/thread.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -40,16 +40,12 @@ #include "runtime/safepoint.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/threadLocalStorage.hpp" +#include "runtime/thread_ext.hpp" #include "runtime/unhandledOops.hpp" -#include "utilities/macros.hpp" - -#if INCLUDE_NMT -#include "services/memRecorder.hpp" -#endif // INCLUDE_NMT - #include "trace/traceBackend.hpp" #include "trace/traceMacros.hpp" #include "utilities/exceptions.hpp" +#include "utilities/macros.hpp" #include "utilities/top.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/dirtyCardQueue.hpp" @@ -86,6 +82,10 @@ class ThreadClosure; class IdealGraphPrinter; +class Metadata; +template class ChunkedList; +typedef ChunkedList MetadataOnStackBuffer; + DEBUG_ONLY(class ResourceMark;) class WorkerThread; @@ -259,8 +259,13 @@ jlong _allocated_bytes; // Cumulative number of bytes allocated on // the Java heap + // Thread-local buffer used by MetadataOnStackMark. + MetadataOnStackBuffer* _metadata_on_stack_buffer; + TRACE_DATA _trace_data; // Thread-local data for tracing + ThreadExt _ext; + int _vm_operation_started_count; // VM_Operation support int _vm_operation_completed_count; // VM_Operation support @@ -436,21 +441,13 @@ jlong allocated_bytes() { return _allocated_bytes; } void set_allocated_bytes(jlong value) { _allocated_bytes = value; } void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } - jlong cooked_allocated_bytes() { - jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes); - if (UseTLAB) { - size_t used_bytes = tlab().used_bytes(); - if ((ssize_t)used_bytes > 0) { - // More-or-less valid tlab. The load_acquire above should ensure - // that the result of the add is <= the instantaneous value - return allocated_bytes + used_bytes; - } - } - return allocated_bytes; - } + inline jlong cooked_allocated_bytes(); TRACE_DATA* trace_data() { return &_trace_data; } + const ThreadExt& ext() const { return _ext; } + ThreadExt& ext() { return _ext; } + // VM operation support int vm_operation_ticket() { return ++_vm_operation_started_count; } int vm_operation_completed_count() { return _vm_operation_completed_count; } @@ -483,13 +480,13 @@ // Apply "cld_f->do_cld" to CLDs that are otherwise not kept alive. // Used by JavaThread::oops_do. // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames - virtual void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf); + virtual void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); // Handles the parallel case for the method below. private: bool claim_oops_do_par_case(int collection_parity); public: - // Requires that "collection_parity" is that of the current strong roots + // Requires that "collection_parity" is that of the current roots // iteration. If "is_par" is false, sets the parity of "this" to // "collection_parity", and returns "true". If "is_par" is true, // uses an atomic instruction to set the current threads parity to @@ -526,7 +523,10 @@ // creation fails due to lack of memory, too many threads etc. bool set_as_starting_thread(); - protected: + void set_metadata_on_stack_buffer(MetadataOnStackBuffer* buffer) { _metadata_on_stack_buffer = buffer; } + MetadataOnStackBuffer* metadata_on_stack_buffer() const { return _metadata_on_stack_buffer; } + +protected: // OS data associated with the thread OSThread* _osthread; // Platform-specific thread information @@ -933,6 +933,12 @@ // This is set to popframe_pending to signal that top Java frame should be popped immediately int _popframe_condition; + // If reallocation of scalar replaced objects fails, we throw OOM + // and during exception propagation, pop the top + // _frames_to_pop_failed_realloc frames, the ones that reference + // failed reallocations. + int _frames_to_pop_failed_realloc; + #ifndef PRODUCT int _jmp_ring_index; struct { @@ -1017,6 +1023,7 @@ // not specified, use the priority of the thread object. Threads_lock // must be held while this function is called. void prepare(jobject jni_thread, ThreadPriority prio=NoPriority); + void prepare_ext(); void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; } address saved_exception_pc() { return _saved_exception_pc; } @@ -1046,12 +1053,8 @@ #else // Use membars when accessing volatile _thread_state. See // Threads::create_vm() for size checks. - JavaThreadState thread_state() const { - return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state); - } - void set_thread_state(JavaThreadState s) { - OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s); - } + inline JavaThreadState thread_state() const; + inline void set_thread_state(JavaThreadState s); #endif ThreadSafepointState *safepoint_state() const { return _safepoint_state; } void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; } @@ -1074,16 +1077,6 @@ bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; } void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; } -#if INCLUDE_NMT - // native memory tracking - inline MemRecorder* get_recorder() const { return (MemRecorder*)_recorder; } - inline void set_recorder(MemRecorder* rc) { _recorder = rc; } - - private: - // per-thread memory recorder - MemRecorder* volatile _recorder; -#endif // INCLUDE_NMT - // Suspend/resume support for JavaThread private: void set_ext_suspended() { set_suspend_flag (_ext_suspended); } @@ -1444,7 +1437,7 @@ void frames_do(void f(frame*, const RegisterMap*)); // Memory operations - void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf); + void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); // Sweeper operations void nmethods_do(CodeBlobClosure* cf); @@ -1526,19 +1519,6 @@ return result; } - // NMT (Native memory tracking) support. - // This flag helps NMT to determine if this JavaThread will be blocked - // at safepoint. If not, ThreadCritical is needed for writing memory records. - // JavaThread is only safepoint visible when it is in Threads' thread list, - // it is not visible until it is added to the list and becomes invisible - // once it is removed from the list. - public: - bool is_safepoint_visible() const { return _safepoint_visible; } - void set_safepoint_visible(bool visible) { _safepoint_visible = visible; } - private: - bool _safepoint_visible; - - // Static operations public: // Returns the running thread as a JavaThread static inline JavaThread* current(); @@ -1611,6 +1591,10 @@ void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; } #endif + int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; } + void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; } + void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; } + private: // Saved incoming arguments to popped frame. // Used only when popped interpreted frame returns to deoptimized frame. @@ -1775,7 +1759,7 @@ // clearing/querying jni attach status bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; } bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; } - void set_done_attaching_via_jni() { _jni_attach_state = _attached_via_jni; OrderAccess::fence(); } + inline void set_done_attaching_via_jni(); private: // This field is used to determine if a thread has claimed // a par_id: it is UINT_MAX if the thread has not claimed a par_id; @@ -1875,7 +1859,7 @@ // GC support // Apply "f->do_oop" to all root oops in "this". // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames - void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf); + void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); #ifndef PRODUCT private: @@ -1942,9 +1926,9 @@ // Apply "f->do_oop" to all root oops in all threads. // This version may only be called by sequential code. - static void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf); + static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); // This version may be called by sequential or parallel code. - static void possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf); + static void possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); // This creates a list of GCTasks, one per thread. static void create_thread_roots_tasks(GCTaskQueue* q); // This creates a list of GCTasks, one per thread, for marking objects. @@ -1998,6 +1982,8 @@ // Deoptimizes all frames tied to marked nmethods static void deoptimized_wrt_marked_nmethods(); + static JavaThread* find_java_thread_from_java_tid(jlong java_tid); + }; --- ./hotspot/src/share/vm/runtime/thread.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/thread.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -46,4 +46,32 @@ #undef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE +inline jlong Thread::cooked_allocated_bytes() { + jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes); + if (UseTLAB) { + size_t used_bytes = tlab().used_bytes(); + if ((ssize_t)used_bytes > 0) { + // More-or-less valid tlab. The load_acquire above should ensure + // that the result of the add is <= the instantaneous value. + return allocated_bytes + used_bytes; + } + } + return allocated_bytes; +} + +#ifdef PPC64 +inline JavaThreadState JavaThread::thread_state() const { + return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state); +} + +inline void JavaThread::set_thread_state(JavaThreadState s) { + OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s); +} +#endif + +inline void JavaThread::set_done_attaching_via_jni() { + _jni_attach_state = _attached_via_jni; + OrderAccess::fence(); +} + #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/runtime/thread_ext.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "runtime/thread.hpp" +#include "runtime/thread_ext.hpp" + +void JavaThread::prepare_ext() { +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/runtime/thread_ext.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_THREAD_EXT_HPP +#define SHARE_VM_RUNTIME_THREAD_EXT_HPP + +#include "memory/allocation.hpp" + +class ThreadExt VALUE_OBJ_CLASS_SPEC { +public: + void print_on(outputStream* st) const {}; +}; + +#endif // SHARE_VM_RUNTIME_THREAD_EXT_HPP --- ./hotspot/src/share/vm/runtime/vframe.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/vframe.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -260,65 +260,156 @@ return fr().interpreter_frame_method(); } -StackValueCollection* interpretedVFrame::locals() const { - int length = method()->max_locals(); +static StackValue* create_stack_value_from_oop_map(const InterpreterOopMap& oop_mask, + int index, + const intptr_t* const addr) { - if (method()->is_native()) { - // If the method is native, max_locals is not telling the truth. - // maxlocals then equals the size of parameters - length = method()->size_of_parameters(); + assert(index >= 0 && + index < oop_mask.number_of_entries(), "invariant"); + + // categorize using oop_mask + if (oop_mask.is_oop(index)) { + // reference (oop) "r" + Handle h(addr != NULL ? (*(oop*)addr) : (oop)NULL); + return new StackValue(h); + } + // value (integer) "v" + return new StackValue(addr != NULL ? *addr : 0); +} + +static bool is_in_expression_stack(const frame& fr, const intptr_t* const addr) { + assert(addr != NULL, "invariant"); + + // Ensure to be 'inside' the expresion stack (i.e., addr >= sp for Intel). + // In case of exceptions, the expression stack is invalid and the sp + // will be reset to express this condition. + if (frame::interpreter_frame_expression_stack_direction() > 0) { + return addr <= fr.interpreter_frame_tos_address(); } - StackValueCollection* result = new StackValueCollection(length); + return addr >= fr.interpreter_frame_tos_address(); +} - // Get oopmap describing oops and int for current bci +static void stack_locals(StackValueCollection* result, + int length, + const InterpreterOopMap& oop_mask, + const frame& fr) { + + assert(result != NULL, "invariant"); + + for (int i = 0; i < length; ++i) { + const intptr_t* const addr = fr.interpreter_frame_local_at(i); + assert(addr != NULL, "invariant"); + assert(addr >= fr.sp(), "must be inside the frame"); + + StackValue* const sv = create_stack_value_from_oop_map(oop_mask, i, addr); + assert(sv != NULL, "sanity check"); + + result->add(sv); + } +} + +static void stack_expressions(StackValueCollection* result, + int length, + int max_locals, + const InterpreterOopMap& oop_mask, + const frame& fr) { + + assert(result != NULL, "invariant"); + + for (int i = 0; i < length; ++i) { + const intptr_t* addr = fr.interpreter_frame_expression_stack_at(i); + assert(addr != NULL, "invariant"); + if (!is_in_expression_stack(fr, addr)) { + // Need to ensure no bogus escapes. + addr = NULL; + } + + StackValue* const sv = create_stack_value_from_oop_map(oop_mask, + i + max_locals, + addr); + assert(sv != NULL, "sanity check"); + + result->add(sv); + } +} + +StackValueCollection* interpretedVFrame::locals() const { + return stack_data(false); +} + +StackValueCollection* interpretedVFrame::expressions() const { + return stack_data(true); +} + +/* + * Worker routine for fetching references and/or values + * for a particular bci in the interpretedVFrame. + * + * Returns data for either "locals" or "expressions", + * using bci relative oop_map (oop_mask) information. + * + * @param expressions bool switch controlling what data to return + (false == locals / true == expressions) + * + */ +StackValueCollection* interpretedVFrame::stack_data(bool expressions) const { + InterpreterOopMap oop_mask; + // oopmap for current bci if (TraceDeoptimization && Verbose) { - methodHandle m_h(thread(), method()); + methodHandle m_h(Thread::current(), method()); OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask); } else { method()->mask_for(bci(), &oop_mask); } - // handle locals - for(int i=0; i < length; i++) { - // Find stack location - intptr_t *addr = locals_addr_at(i); - // Depending on oop/int put it in the right package - StackValue *sv; - if (oop_mask.is_oop(i)) { - // oop value - Handle h(*(oop *)addr); - sv = new StackValue(h); - } else { - // integer - sv = new StackValue(*addr); - } - assert(sv != NULL, "sanity check"); - result->add(sv); + const int mask_len = oop_mask.number_of_entries(); + + // If the method is native, method()->max_locals() is not telling the truth. + // For our purposes, max locals instead equals the size of parameters. + const int max_locals = method()->is_native() ? + method()->size_of_parameters() : method()->max_locals(); + + assert(mask_len >= max_locals, "invariant"); + + const int length = expressions ? mask_len - max_locals : max_locals; + assert(length >= 0, "invariant"); + + StackValueCollection* const result = new StackValueCollection(length); + + if (0 == length) { + return result; } + + if (expressions) { + stack_expressions(result, length, max_locals, oop_mask, fr()); + } else { + stack_locals(result, length, oop_mask, fr()); + } + + assert(length == result->size(), "invariant"); + return result; } void interpretedVFrame::set_locals(StackValueCollection* values) const { if (values == NULL || values->size() == 0) return; - int length = method()->max_locals(); - if (method()->is_native()) { - // If the method is native, max_locals is not telling the truth. - // maxlocals then equals the size of parameters - length = method()->size_of_parameters(); - } + // If the method is native, max_locals is not telling the truth. + // maxlocals then equals the size of parameters + const int max_locals = method()->is_native() ? + method()->size_of_parameters() : method()->max_locals(); - assert(length == values->size(), "Mismatch between actual stack format and supplied data"); + assert(max_locals == values->size(), "Mismatch between actual stack format and supplied data"); // handle locals - for (int i = 0; i < length; i++) { + for (int i = 0; i < max_locals; i++) { // Find stack location intptr_t *addr = locals_addr_at(i); // Depending on oop/int put it in the right package - StackValue *sv = values->at(i); + const StackValue* const sv = values->at(i); assert(sv != NULL, "sanity check"); if (sv->type() == T_OBJECT) { *(oop *) addr = (sv->get_obj())(); @@ -328,46 +419,6 @@ } } -StackValueCollection* interpretedVFrame::expressions() const { - int length = fr().interpreter_frame_expression_stack_size(); - if (method()->is_native()) { - // If the method is native, there is no expression stack - length = 0; - } - - int nof_locals = method()->max_locals(); - StackValueCollection* result = new StackValueCollection(length); - - InterpreterOopMap oop_mask; - // Get oopmap describing oops and int for current bci - if (TraceDeoptimization && Verbose) { - methodHandle m_h(method()); - OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask); - } else { - method()->mask_for(bci(), &oop_mask); - } - // handle expressions - for(int i=0; i < length; i++) { - // Find stack location - intptr_t *addr = fr().interpreter_frame_expression_stack_at(i); - - // Depending on oop/int put it in the right package - StackValue *sv; - if (oop_mask.is_oop(i + nof_locals)) { - // oop value - Handle h(*(oop *)addr); - sv = new StackValue(h); - } else { - // integer - sv = new StackValue(*addr); - } - assert(sv != NULL, "sanity check"); - result->add(sv); - } - return result; -} - - // ------------- cChunk -------------- entryVFrame::entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) --- ./hotspot/src/share/vm/runtime/vframe.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/vframe.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -186,7 +186,7 @@ private: static const int bcp_offset; intptr_t* locals_addr_at(int offset) const; - + StackValueCollection* stack_data(bool expressions) const; // returns where the parameters starts relative to the frame pointer int start_of_parameters() const; --- ./hotspot/src/share/vm/runtime/vframeArray.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/vframeArray.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -56,7 +56,7 @@ } } -void vframeArrayElement::fill_in(compiledVFrame* vf) { +void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) { // Copy the information from the compiled vframe to the // interpreter frame we will be creating to replace vf @@ -64,6 +64,9 @@ _method = vf->method(); _bci = vf->raw_bci(); _reexecute = vf->should_reexecute(); +#ifdef ASSERT + _removed_monitors = false; +#endif int index; @@ -81,11 +84,15 @@ // Migrate the BasicLocks from the stack to the monitor chunk for (index = 0; index < list->length(); index++) { MonitorInfo* monitor = list->at(index); - assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already"); - assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased"); + assert(!monitor->owner_is_scalar_replaced() || realloc_failures, "object should be reallocated already"); BasicObjectLock* dest = _monitors->at(index); - dest->set_obj(monitor->owner()); - monitor->lock()->move_to(monitor->owner(), dest->lock()); + if (monitor->owner_is_scalar_replaced()) { + dest->set_obj(NULL); + } else { + assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased"); + dest->set_obj(monitor->owner()); + monitor->lock()->move_to(monitor->owner(), dest->lock()); + } } } @@ -110,7 +117,7 @@ StackValue* value = locs->at(index); switch(value->type()) { case T_OBJECT: - assert(!value->obj_is_scalar_replaced(), "object should be reallocated already"); + assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already"); // preserve object type _locals->add( new StackValue(cast_from_oop((value->get_obj()())), T_OBJECT )); break; @@ -135,7 +142,7 @@ StackValue* value = exprs->at(index); switch(value->type()) { case T_OBJECT: - assert(!value->obj_is_scalar_replaced(), "object should be reallocated already"); + assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already"); // preserve object type _expressions->add( new StackValue(cast_from_oop((value->get_obj()())), T_OBJECT )); break; @@ -286,7 +293,7 @@ _frame.patch_pc(thread, pc); - assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors"); + assert (!method()->is_synchronized() || locks > 0 || _removed_monitors, "synchronized methods must have monitors"); BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin(); for (int index = 0; index < locks; index++) { @@ -438,7 +445,8 @@ vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray* chunk, - RegisterMap *reg_map, frame sender, frame caller, frame self) { + RegisterMap *reg_map, frame sender, frame caller, frame self, + bool realloc_failures) { // Allocate the vframeArray vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part @@ -450,19 +458,20 @@ result->_caller = caller; result->_original = self; result->set_unroll_block(NULL); // initialize it - result->fill_in(thread, frame_size, chunk, reg_map); + result->fill_in(thread, frame_size, chunk, reg_map, realloc_failures); return result; } void vframeArray::fill_in(JavaThread* thread, int frame_size, GrowableArray* chunk, - const RegisterMap *reg_map) { + const RegisterMap *reg_map, + bool realloc_failures) { // Set owner first, it is used when adding monitor chunks _frame_size = frame_size; for(int i = 0; i < chunk->length(); i++) { - element(i)->fill_in(chunk->at(i)); + element(i)->fill_in(chunk->at(i), realloc_failures); } // Copy registers for callee-saved registers --- ./hotspot/src/share/vm/runtime/vframeArray.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/vframeArray.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -58,6 +58,9 @@ MonitorChunk* _monitors; // active monitors for this vframe StackValueCollection* _locals; StackValueCollection* _expressions; +#ifdef ASSERT + bool _removed_monitors; +#endif public: @@ -78,7 +81,7 @@ StackValueCollection* expressions(void) const { return _expressions; } - void fill_in(compiledVFrame* vf); + void fill_in(compiledVFrame* vf, bool realloc_failures); // Formerly part of deoptimizedVFrame @@ -99,6 +102,12 @@ bool is_bottom_frame, int exec_mode); +#ifdef ASSERT + void set_removed_monitors() { + _removed_monitors = true; + } +#endif + #ifndef PRODUCT void print(outputStream* st); #endif /* PRODUCT */ @@ -160,13 +169,14 @@ int frames() const { return _frames; } static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray* chunk, - RegisterMap* reg_map, frame sender, frame caller, frame self); + RegisterMap* reg_map, frame sender, frame caller, frame self, + bool realloc_failures); vframeArrayElement* element(int index) { assert(is_within_bounds(index), "Bad index"); return &_elements[index]; } // Allocates a new vframe in the array and fills the array with vframe information in chunk - void fill_in(JavaThread* thread, int frame_size, GrowableArray* chunk, const RegisterMap *reg_map); + void fill_in(JavaThread* thread, int frame_size, GrowableArray* chunk, const RegisterMap *reg_map, bool realloc_failures); // Returns the owner of this vframeArray JavaThread* owner_thread() const { return _owner_thread; } --- ./hotspot/src/share/vm/runtime/vmStructs.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/vmStructs.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -320,7 +320,7 @@ nonstatic_field(InstanceKlass, _jni_ids, JNIid*) \ nonstatic_field(InstanceKlass, _osr_nmethods_head, nmethod*) \ nonstatic_field(InstanceKlass, _breakpoints, BreakpointInfo*) \ - nonstatic_field(InstanceKlass, _generic_signature_index, u2) \ + nonstatic_field(InstanceKlass, _generic_signature_index, u2) \ nonstatic_field(InstanceKlass, _methods_jmethod_ids, jmethodID*) \ volatile_nonstatic_field(InstanceKlass, _idnum_allocated_count, u2) \ nonstatic_field(InstanceKlass, _annotations, Annotations*) \ @@ -665,6 +665,7 @@ static_field(SystemDictionary, WK_KLASS(StackOverflowError_klass), Klass*) \ static_field(SystemDictionary, WK_KLASS(ProtectionDomain_klass), Klass*) \ static_field(SystemDictionary, WK_KLASS(AccessControlContext_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(SecureClassLoader_klass), Klass*) \ static_field(SystemDictionary, WK_KLASS(Reference_klass), Klass*) \ static_field(SystemDictionary, WK_KLASS(SoftReference_klass), Klass*) \ static_field(SystemDictionary, WK_KLASS(WeakReference_klass), Klass*) \ @@ -814,6 +815,7 @@ static_field(StubRoutines, _cipherBlockChaining_decryptAESCrypt, address) \ static_field(StubRoutines, _updateBytesCRC32, address) \ static_field(StubRoutines, _crc_table_adr, address) \ + static_field(StubRoutines, _multiplyToLen, address) \ \ /*****************/ \ /* SharedRuntime */ \ @@ -2495,6 +2497,7 @@ declare_constant(Deoptimization::Reason_age) \ declare_constant(Deoptimization::Reason_predicate) \ declare_constant(Deoptimization::Reason_loop_limit_check) \ + declare_constant(Deoptimization::Reason_unstable_if) \ declare_constant(Deoptimization::Reason_LIMIT) \ declare_constant(Deoptimization::Reason_RECORDED_LIMIT) \ \ --- ./hotspot/src/share/vm/runtime/vmThread.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/vmThread.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -682,7 +682,7 @@ } -void VMThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { +void VMThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { Thread::oops_do(f, cld_f, cf); _vm_queue->oops_do(f); } --- ./hotspot/src/share/vm/runtime/vmThread.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/vmThread.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -126,7 +126,7 @@ static VMThread* vm_thread() { return _vm_thread; } // GC support - void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf); + void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); // Debugging void print_on(outputStream* st) const; --- ./hotspot/src/share/vm/runtime/vm_operations.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/runtime/vm_operations.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -68,6 +68,7 @@ template(G1CollectFull) \ template(G1CollectForAllocation) \ template(G1IncCollectionPause) \ + template(DestroyAllocationContext) \ template(EnableBiasedLocking) \ template(RevokeBias) \ template(BulkRevokeBias) \ --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/allocationContextService.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP +#define SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP + +#include "utilities/exceptions.hpp" + +class AllocationContextService: public AllStatic { +public: + static inline bool should_notify(); + static inline void notify(TRAPS); +}; + +bool AllocationContextService::should_notify() { return false; } +void AllocationContextService::notify(TRAPS) { } + +#endif // SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/allocationSite.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_SERVICES_ALLOCATION_SITE_HPP +#define SHARE_VM_SERVICES_ALLOCATION_SITE_HPP + +#include "memory/allocation.hpp" +#include "utilities/nativeCallStack.hpp" + +// Allocation site represents a code path that makes a memory +// allocation +template class AllocationSite VALUE_OBJ_CLASS_SPEC { + private: + NativeCallStack _call_stack; + E e; + public: + AllocationSite(const NativeCallStack& stack) : _call_stack(stack) { } + int hash() const { return _call_stack.hash(); } + bool equals(const NativeCallStack& stack) const { + return _call_stack.equals(stack); + } + + bool equals(const AllocationSite& other) const { + return other.equals(_call_stack); + } + + const NativeCallStack* call_stack() const { + return &_call_stack; + } + + // Information regarding this allocation + E* data() { return &e; } + const E* peek() const { return &e; } +}; + +#endif // SHARE_VM_SERVICES_ALLOCATION_SITE_HPP --- ./hotspot/src/share/vm/services/attachListener.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/attachListener.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -162,10 +162,7 @@ java_lang_Throwable::print(PENDING_EXCEPTION, out); out->cr(); CLEAR_PENDING_EXCEPTION; - // The exception has been printed on the output stream - // If the JVM returns JNI_ERR, the attachAPI throws a generic I/O - // exception and the content of the output stream is not processed. - // By returning JNI_OK, the exception will be displayed on the client side + return JNI_ERR; } return JNI_OK; } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/mallocSiteTable.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + + +#include "memory/allocation.inline.hpp" +#include "runtime/atomic.hpp" +#include "services/mallocSiteTable.hpp" + +/* + * Early os::malloc() calls come from initializations of static variables, long before entering any + * VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be + * initialized, along with the allocation site for the hashtable entries. + * To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc() + * call, the hashtable bucket array and hashtable entry allocation site have to be static. + * It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just + * allocates a block memory and zero the memory for it. + * But for hashtable entry allocation site object, things get tricky. C runtime not only allocates + * memory for it, but also calls its constructor at some later time. If we initialize the allocation site + * at the first os::malloc() call, the object will be reinitialized when its constructor is called + * by C runtime. + * To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry, + * the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site. + * Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable, + * which is exactly what we want. + * The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation. + * + * Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare + * the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment. + */ + +// Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects +size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)]; +size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)]; + +// Malloc site hashtable buckets +MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size]; + +// concurrent access counter +volatile int MallocSiteTable::_access_count = 0; + +// Tracking hashtable contention +NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;) + + +/* + * Initialize malloc site table. + * Hashtable entry is malloc'd, so it can cause infinite recursion. + * To avoid above problem, we pre-initialize a hash entry for + * this allocation site. + * The method is called during C runtime static variable initialization + * time, it is in single-threaded mode from JVM perspective. + */ +bool MallocSiteTable::initialize() { + assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check"); + assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry), + "Sanity Check"); + assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow"); + + // Fake the call stack for hashtable entry allocation + assert(NMT_TrackingStackDepth > 1, "At least one tracking stack"); + + // Create pseudo call stack for hashtable entry allocation + address pc[3]; + if (NMT_TrackingStackDepth >= 3) { + pc[2] = (address)MallocSiteTable::allocation_at; + } + if (NMT_TrackingStackDepth >= 2) { + pc[1] = (address)MallocSiteTable::lookup_or_add; + } + pc[0] = (address)MallocSiteTable::new_entry; + + // Instantiate NativeCallStack object, have to use placement new operator. (see comments above) + NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack) + NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth))); + + // Instantiate hash entry for hashtable entry allocation callsite + MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site) + MallocSiteHashtableEntry(*stack); + + // Add the allocation site to hashtable. + int index = hash_to_index(stack->hash()); + _table[index] = entry; + + return true; +} + +// Walks entries in the hashtable. +// It stops walk if the walker returns false. +bool MallocSiteTable::walk(MallocSiteWalker* walker) { + MallocSiteHashtableEntry* head; + for (int index = 0; index < table_size; index ++) { + head = _table[index]; + while (head != NULL) { + if (!walker->do_malloc_site(head->peek())) { + return false; + } + head = (MallocSiteHashtableEntry*)head->next(); + } + } + return true; +} + +/* + * The hashtable does not have deletion policy on individual entry, + * and each linked list node is inserted via compare-and-swap, + * so each linked list is stable, the contention only happens + * at the end of linked list. + * This method should not return NULL under normal circumstance. + * If NULL is returned, it indicates: + * 1. Out of memory, it cannot allocate new hash entry. + * 2. Overflow hash bucket. + * Under any of above circumstances, caller should handle the situation. + */ +MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, + size_t* pos_idx) { + int index = hash_to_index(key.hash()); + assert(index >= 0, "Negative index"); + *bucket_idx = (size_t)index; + *pos_idx = 0; + + // First entry for this hash bucket + if (_table[index] == NULL) { + MallocSiteHashtableEntry* entry = new_entry(key); + // OOM check + if (entry == NULL) return NULL; + + // swap in the head + if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) { + return entry->data(); + } + + delete entry; + } + + MallocSiteHashtableEntry* head = _table[index]; + while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) { + MallocSite* site = head->data(); + if (site->equals(key)) { + // found matched entry + return head->data(); + } + + if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) { + MallocSiteHashtableEntry* entry = new_entry(key); + // OOM check + if (entry == NULL) return NULL; + if (head->atomic_insert(entry)) { + (*pos_idx) ++; + return entry->data(); + } + // contended, other thread won + delete entry; + } + head = (MallocSiteHashtableEntry*)head->next(); + (*pos_idx) ++; + } + return NULL; +} + +// Access malloc site +MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) { + assert(bucket_idx < table_size, "Invalid bucket index"); + MallocSiteHashtableEntry* head = _table[bucket_idx]; + for (size_t index = 0; index < pos_idx && head != NULL; + index ++, head = (MallocSiteHashtableEntry*)head->next()); + assert(head != NULL, "Invalid position index"); + return head->data(); +} + +// Allocates MallocSiteHashtableEntry object. Special call stack +// (pre-installed allocation site) has to be used to avoid infinite +// recursion. +MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key) { + void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT, + *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL); + return ::new (p) MallocSiteHashtableEntry(key); +} + +void MallocSiteTable::reset() { + for (int index = 0; index < table_size; index ++) { + MallocSiteHashtableEntry* head = _table[index]; + _table[index] = NULL; + delete_linked_list(head); + } +} + +void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) { + MallocSiteHashtableEntry* p; + while (head != NULL) { + p = head; + head = (MallocSiteHashtableEntry*)head->next(); + if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) { + delete p; + } + } +} + +void MallocSiteTable::shutdown() { + AccessLock locker(&_access_count); + locker.exclusiveLock(); + reset(); +} + +bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) { + assert(walker != NULL, "NuLL walker"); + AccessLock locker(&_access_count); + if (locker.sharedLock()) { + NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) + return walk(walker); + } + return false; +} + + +void MallocSiteTable::AccessLock::exclusiveLock() { + jint target; + jint val; + + assert(_lock_state != ExclusiveLock, "Can only call once"); + assert(*_lock >= 0, "Can not content exclusive lock"); + + // make counter negative to block out shared locks + do { + val = *_lock; + target = _MAGIC_ + *_lock; + } while (Atomic::cmpxchg(target, _lock, val) != val); + + // wait for all readers to exit + while (*_lock != _MAGIC_) { +#ifdef _WINDOWS + os::naked_short_sleep(1); +#else + os::NakedYield(); +#endif + } + _lock_state = ExclusiveLock; +} + + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/mallocSiteTable.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP +#define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP + +#if INCLUDE_NMT + +#include "memory/allocation.hpp" +#include "runtime/atomic.hpp" +#include "services/allocationSite.hpp" +#include "services/mallocTracker.hpp" +#include "services/nmtCommon.hpp" +#include "utilities/nativeCallStack.hpp" + +// MallocSite represents a code path that eventually calls +// os::malloc() to allocate memory +class MallocSite : public AllocationSite { + public: + MallocSite() : + AllocationSite(NativeCallStack::EMPTY_STACK) { } + + MallocSite(const NativeCallStack& stack) : + AllocationSite(stack) { } + + void allocate(size_t size) { data()->allocate(size); } + void deallocate(size_t size) { data()->deallocate(size); } + + // Memory allocated from this code path + size_t size() const { return peek()->size(); } + // The number of calls were made + size_t count() const { return peek()->count(); } +}; + +// Malloc site hashtable entry +class MallocSiteHashtableEntry : public CHeapObj { + private: + MallocSite _malloc_site; + MallocSiteHashtableEntry* _next; + + public: + MallocSiteHashtableEntry() : _next(NULL) { } + + MallocSiteHashtableEntry(NativeCallStack stack): + _malloc_site(stack), _next(NULL) { } + + inline const MallocSiteHashtableEntry* next() const { + return _next; + } + + // Insert an entry atomically. + // Return true if the entry is inserted successfully. + // The operation can be failed due to contention from other thread. + bool atomic_insert(const MallocSiteHashtableEntry* entry) { + return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next, + NULL) == NULL); + } + + void set_callsite(const MallocSite& site) { + _malloc_site = site; + } + + inline const MallocSite* peek() const { return &_malloc_site; } + inline MallocSite* data() { return &_malloc_site; } + + inline long hash() const { return _malloc_site.hash(); } + inline bool equals(const NativeCallStack& stack) const { + return _malloc_site.equals(stack); + } + // Allocation/deallocation on this allocation site + inline void allocate(size_t size) { _malloc_site.allocate(size); } + inline void deallocate(size_t size) { _malloc_site.deallocate(size); } + // Memory counters + inline size_t size() const { return _malloc_site.size(); } + inline size_t count() const { return _malloc_site.count(); } +}; + +// The walker walks every entry on MallocSiteTable +class MallocSiteWalker : public StackObj { + public: + virtual bool do_malloc_site(const MallocSite* e) { return false; } +}; + +/* + * Native memory tracking call site table. + * The table is only needed when detail tracking is enabled. + */ +class MallocSiteTable : AllStatic { + private: + // The number of hash bucket in this hashtable. The number should + // be tuned if malloc activities changed significantly. + // The statistics data can be obtained via Jcmd + // jcmd VM.native_memory statistics. + + // Currently, (number of buckets / number of entires) ratio is + // about 1 / 6 + enum { + table_base_size = 128, // The base size is calculated from statistics to give + // table ratio around 1:6 + table_size = (table_base_size * NMT_TrackingStackDepth - 1) + }; + + + // This is a very special lock, that allows multiple shared accesses (sharedLock), but + // once exclusive access (exclusiveLock) is requested, all shared accesses are + // rejected forever. + class AccessLock : public StackObj { + enum LockState { + NoLock, + SharedLock, + ExclusiveLock + }; + + private: + // A very large negative number. The only possibility to "overflow" + // this number is when there are more than -min_jint threads in + // this process, which is not going to happen in foreseeable future. + const static int _MAGIC_ = min_jint; + + LockState _lock_state; + volatile int* _lock; + public: + AccessLock(volatile int* lock) : + _lock(lock), _lock_state(NoLock) { + } + + ~AccessLock() { + if (_lock_state == SharedLock) { + Atomic::dec((volatile jint*)_lock); + } + } + // Acquire shared lock. + // Return true if shared access is granted. + inline bool sharedLock() { + jint res = Atomic::add(1, _lock); + if (res < 0) { + Atomic::add(-1, _lock); + return false; + } + _lock_state = SharedLock; + return true; + } + // Acquire exclusive lock + void exclusiveLock(); + }; + + public: + static bool initialize(); + static void shutdown(); + + NOT_PRODUCT(static int access_peak_count() { return _peak_count; }) + + // Number of hash buckets + static inline int hash_buckets() { return (int)table_size; } + + // Access and copy a call stack from this table. Shared lock should be + // acquired before access the entry. + static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx, + size_t pos_idx) { + AccessLock locker(&_access_count); + if (locker.sharedLock()) { + NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) + MallocSite* site = malloc_site(bucket_idx, pos_idx); + if (site != NULL) { + stack = *site->call_stack(); + return true; + } + } + return false; + } + + // Record a new allocation from specified call path. + // Return true if the allocation is recorded successfully, bucket_idx + // and pos_idx are also updated to indicate the entry where the allocation + // information was recorded. + // Return false only occurs under rare scenarios: + // 1. out of memory + // 2. overflow hash bucket + static inline bool allocation_at(const NativeCallStack& stack, size_t size, + size_t* bucket_idx, size_t* pos_idx) { + AccessLock locker(&_access_count); + if (locker.sharedLock()) { + NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) + MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx); + if (site != NULL) site->allocate(size); + return site != NULL; + } + return false; + } + + // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation + // information was recorded. + static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) { + AccessLock locker(&_access_count); + if (locker.sharedLock()) { + NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) + MallocSite* site = malloc_site(bucket_idx, pos_idx); + if (site != NULL) { + site->deallocate(size); + return true; + } + } + return false; + } + + // Walk this table. + static bool walk_malloc_site(MallocSiteWalker* walker); + + private: + static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key); + static void reset(); + + // Delete a bucket linked list + static void delete_linked_list(MallocSiteHashtableEntry* head); + + static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx); + static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx); + static bool walk(MallocSiteWalker* walker); + + static inline int hash_to_index(int hash) { + hash = (hash > 0) ? hash : (-hash); + return (hash % table_size); + } + + static inline const NativeCallStack* hash_entry_allocation_stack() { + return (NativeCallStack*)_hash_entry_allocation_stack; + } + + private: + // Counter for counting concurrent access + static volatile int _access_count; + + // The callsite hashtable. It has to be a static table, + // since malloc call can come from C runtime linker. + static MallocSiteHashtableEntry* _table[table_size]; + + + // Reserve enough memory for placing the objects + + // The memory for hashtable entry allocation stack object + static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)]; + // The memory for hashtable entry allocation callsite object + static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)]; + NOT_PRODUCT(static int _peak_count;) +}; + +#endif // INCLUDE_NMT +#endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/mallocTracker.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" +#include "services/mallocSiteTable.hpp" +#include "services/mallocTracker.hpp" +#include "services/mallocTracker.inline.hpp" +#include "services/memTracker.hpp" + +size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)]; + +// Total malloc'd memory amount +size_t MallocMemorySnapshot::total() const { + size_t amount = 0; + for (int index = 0; index < mt_number_of_types; index ++) { + amount += _malloc[index].malloc_size(); + } + amount += _tracking_header.size() + total_arena(); + return amount; +} + +// Total malloc'd memory used by arenas +size_t MallocMemorySnapshot::total_arena() const { + size_t amount = 0; + for (int index = 0; index < mt_number_of_types; index ++) { + amount += _malloc[index].arena_size(); + } + return amount; +} + +// Make adjustment by subtracting chunks used by arenas +// from total chunks to get total free chunck size +void MallocMemorySnapshot::make_adjustment() { + size_t arena_size = total_arena(); + int chunk_idx = NMTUtil::flag_to_index(mtChunk); + _malloc[chunk_idx].record_free(arena_size); +} + + +void MallocMemorySummary::initialize() { + assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check"); + // Uses placement new operator to initialize static area. + ::new ((void*)_snapshot)MallocMemorySnapshot(); +} + +void MallocHeader::release() const { + // Tracking already shutdown, no housekeeping is needed anymore + if (MemTracker::tracking_level() <= NMT_minimal) return; + + MallocMemorySummary::record_free(size(), flags()); + MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader)); + if (MemTracker::tracking_level() == NMT_detail) { + MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx); + } +} + +bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size, + size_t* bucket_idx, size_t* pos_idx) const { + bool ret = MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx); + + // Something went wrong, could be OOM or overflow malloc site table. + // We want to keep tracking data under OOM circumstance, so transition to + // summary tracking. + if (!ret) { + MemTracker::transition_to(NMT_summary); + } + return ret; +} + +bool MallocHeader::get_stack(NativeCallStack& stack) const { + return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx); +} + +bool MallocTracker::initialize(NMT_TrackingLevel level) { + if (level >= NMT_summary) { + MallocMemorySummary::initialize(); + } + + if (level == NMT_detail) { + return MallocSiteTable::initialize(); + } + return true; +} + +bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { + assert(from != NMT_off, "Can not transition from off state"); + assert(to != NMT_off, "Can not transition to off state"); + assert (from != NMT_minimal, "cannot transition from minimal state"); + + if (from == NMT_detail) { + assert(to == NMT_minimal || to == NMT_summary, "Just check"); + MallocSiteTable::shutdown(); + } + return true; +} + +// Record a malloc memory allocation +void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags, + const NativeCallStack& stack, NMT_TrackingLevel level) { + void* memblock; // the address for user data + MallocHeader* header = NULL; + + if (malloc_base == NULL) { + return NULL; + } + + // Uses placement global new operator to initialize malloc header + + if (level == NMT_off) { + return malloc_base; + } + + header = ::new (malloc_base)MallocHeader(size, flags, stack, level); + memblock = (void*)((char*)malloc_base + sizeof(MallocHeader)); + + // The alignment check: 8 bytes alignment for 32 bit systems. + // 16 bytes alignment for 64-bit systems. + assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check"); + +#ifdef ASSERT + if (level > NMT_minimal) { + // Read back + assert(get_size(memblock) == size, "Wrong size"); + assert(get_flags(memblock) == flags, "Wrong flags"); + } +#endif + + return memblock; +} + +void* MallocTracker::record_free(void* memblock) { + // Never turned on + if (MemTracker::tracking_level() == NMT_off || + memblock == NULL) { + return memblock; + } + MallocHeader* header = malloc_header(memblock); + header->release(); + + return (void*)header; +} + + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/mallocTracker.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP +#define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP + +#if INCLUDE_NMT + +#include "memory/allocation.hpp" +#include "runtime/atomic.hpp" +#include "services/nmtCommon.hpp" +#include "utilities/nativeCallStack.hpp" + +/* + * This counter class counts memory allocation and deallocation, + * records total memory allocation size and number of allocations. + * The counters are updated atomically. + */ +class MemoryCounter VALUE_OBJ_CLASS_SPEC { + private: + size_t _count; + size_t _size; + + DEBUG_ONLY(size_t _peak_count;) + DEBUG_ONLY(size_t _peak_size; ) + + public: + MemoryCounter() : _count(0), _size(0) { + DEBUG_ONLY(_peak_count = 0;) + DEBUG_ONLY(_peak_size = 0;) + } + + inline void allocate(size_t sz) { + Atomic::add(1, (volatile MemoryCounterType*)&_count); + if (sz > 0) { + Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size); + DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size)); + } + DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);) + } + + inline void deallocate(size_t sz) { + assert(_count > 0, "Negative counter"); + assert(_size >= sz, "Negative size"); + Atomic::add(-1, (volatile MemoryCounterType*)&_count); + if (sz > 0) { + Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size); + } + } + + inline void resize(long sz) { + if (sz != 0) { + Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size); + DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);) + } + } + + inline size_t count() const { return _count; } + inline size_t size() const { return _size; } + DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; }) + DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; }) + +}; + +/* + * Malloc memory used by a particular subsystem. + * It includes the memory acquired through os::malloc() + * call and arena's backing memory. + */ +class MallocMemory VALUE_OBJ_CLASS_SPEC { + private: + MemoryCounter _malloc; + MemoryCounter _arena; + + public: + MallocMemory() { } + + inline void record_malloc(size_t sz) { + _malloc.allocate(sz); + } + + inline void record_free(size_t sz) { + _malloc.deallocate(sz); + } + + inline void record_new_arena() { + _arena.allocate(0); + } + + inline void record_arena_free() { + _arena.deallocate(0); + } + + inline void record_arena_size_change(long sz) { + _arena.resize(sz); + } + + inline size_t malloc_size() const { return _malloc.size(); } + inline size_t malloc_count() const { return _malloc.count();} + inline size_t arena_size() const { return _arena.size(); } + inline size_t arena_count() const { return _arena.count(); } + + DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; }) + DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; }) +}; + +class MallocMemorySummary; + +// A snapshot of malloc'd memory, includes malloc memory +// usage by types and memory used by tracking itself. +class MallocMemorySnapshot : public ResourceObj { + friend class MallocMemorySummary; + + private: + MallocMemory _malloc[mt_number_of_types]; + MemoryCounter _tracking_header; + + + public: + inline MallocMemory* by_type(MEMFLAGS flags) { + int index = NMTUtil::flag_to_index(flags); + return &_malloc[index]; + } + + inline MallocMemory* by_index(int index) { + assert(index >= 0, "Index out of bound"); + assert(index < mt_number_of_types, "Index out of bound"); + return &_malloc[index]; + } + + inline MemoryCounter* malloc_overhead() { + return &_tracking_header; + } + + // Total malloc'd memory amount + size_t total() const; + // Total malloc'd memory used by arenas + size_t total_arena() const; + + inline size_t thread_count() const { + MallocMemorySnapshot* s = const_cast(this); + return s->by_type(mtThreadStack)->malloc_count(); + } + + void copy_to(MallocMemorySnapshot* s) { + s->_tracking_header = _tracking_header; + for (int index = 0; index < mt_number_of_types; index ++) { + s->_malloc[index] = _malloc[index]; + } + } + + // Make adjustment by subtracting chunks used by arenas + // from total chunks to get total free chunk size + void make_adjustment(); +}; + +/* + * This class is for collecting malloc statistics at summary level + */ +class MallocMemorySummary : AllStatic { + private: + // Reserve memory for placement of MallocMemorySnapshot object + static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)]; + + public: + static void initialize(); + + static inline void record_malloc(size_t size, MEMFLAGS flag) { + as_snapshot()->by_type(flag)->record_malloc(size); + } + + static inline void record_free(size_t size, MEMFLAGS flag) { + as_snapshot()->by_type(flag)->record_free(size); + } + + static inline void record_new_arena(MEMFLAGS flag) { + as_snapshot()->by_type(flag)->record_new_arena(); + } + + static inline void record_arena_free(MEMFLAGS flag) { + as_snapshot()->by_type(flag)->record_arena_free(); + } + + static inline void record_arena_size_change(long size, MEMFLAGS flag) { + as_snapshot()->by_type(flag)->record_arena_size_change(size); + } + + static void snapshot(MallocMemorySnapshot* s) { + as_snapshot()->copy_to(s); + s->make_adjustment(); + } + + // Record memory used by malloc tracking header + static inline void record_new_malloc_header(size_t sz) { + as_snapshot()->malloc_overhead()->allocate(sz); + } + + static inline void record_free_malloc_header(size_t sz) { + as_snapshot()->malloc_overhead()->deallocate(sz); + } + + // The memory used by malloc tracking headers + static inline size_t tracking_overhead() { + return as_snapshot()->malloc_overhead()->size(); + } + + static MallocMemorySnapshot* as_snapshot() { + return (MallocMemorySnapshot*)_snapshot; + } +}; + + +/* + * Malloc tracking header. + * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose, + * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build). + */ + +class MallocHeader VALUE_OBJ_CLASS_SPEC { +#ifdef _LP64 + size_t _size : 64; + size_t _flags : 8; + size_t _pos_idx : 16; + size_t _bucket_idx: 40; +#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40) +#define MAX_BUCKET_LENGTH right_n_bits(16) +#else + size_t _size : 32; + size_t _flags : 8; + size_t _pos_idx : 8; + size_t _bucket_idx: 16; +#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(16) +#define MAX_BUCKET_LENGTH right_n_bits(8) +#endif // _LP64 + + public: + MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) { + assert(sizeof(MallocHeader) == sizeof(void*) * 2, + "Wrong header size"); + + if (level == NMT_minimal) { + return; + } + + _flags = flags; + set_size(size); + if (level == NMT_detail) { + size_t bucket_idx; + size_t pos_idx; + if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) { + assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index"); + assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index"); + _bucket_idx = bucket_idx; + _pos_idx = pos_idx; + } + } + + MallocMemorySummary::record_malloc(size, flags); + MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader)); + } + + inline size_t size() const { return _size; } + inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; } + bool get_stack(NativeCallStack& stack) const; + + // Cleanup tracking information before the memory is released. + void release() const; + + private: + inline void set_size(size_t size) { + _size = size; + } + bool record_malloc_site(const NativeCallStack& stack, size_t size, + size_t* bucket_idx, size_t* pos_idx) const; +}; + + +// Main class called from MemTracker to track malloc activities +class MallocTracker : AllStatic { + public: + // Initialize malloc tracker for specific tracking level + static bool initialize(NMT_TrackingLevel level); + + static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to); + + // malloc tracking header size for specific tracking level + static inline size_t malloc_header_size(NMT_TrackingLevel level) { + return (level == NMT_off) ? 0 : sizeof(MallocHeader); + } + + // Parameter name convention: + // memblock : the beginning address for user data + // malloc_base: the beginning address that includes malloc tracking header + // + // The relationship: + // memblock = (char*)malloc_base + sizeof(nmt header) + // + + // Record malloc on specified memory block + static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags, + const NativeCallStack& stack, NMT_TrackingLevel level); + + // Record free on specified memory block + static void* record_free(void* memblock); + + // Offset memory address to header address + static inline void* get_base(void* memblock); + static inline void* get_base(void* memblock, NMT_TrackingLevel level) { + if (memblock == NULL || level == NMT_off) return memblock; + return (char*)memblock - malloc_header_size(level); + } + + // Get memory size + static inline size_t get_size(void* memblock) { + MallocHeader* header = malloc_header(memblock); + return header->size(); + } + + // Get memory type + static inline MEMFLAGS get_flags(void* memblock) { + MallocHeader* header = malloc_header(memblock); + return header->flags(); + } + + // Get header size + static inline size_t get_header_size(void* memblock) { + return (memblock == NULL) ? 0 : sizeof(MallocHeader); + } + + static inline void record_new_arena(MEMFLAGS flags) { + MallocMemorySummary::record_new_arena(flags); + } + + static inline void record_arena_free(MEMFLAGS flags) { + MallocMemorySummary::record_arena_free(flags); + } + + static inline void record_arena_size_change(int size, MEMFLAGS flags) { + MallocMemorySummary::record_arena_size_change(size, flags); + } + private: + static inline MallocHeader* malloc_header(void *memblock) { + assert(memblock != NULL, "NULL pointer"); + MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader)); + return header; + } +}; + +#endif // INCLUDE_NMT + + +#endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/mallocTracker.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP +#define SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP + +#include "services/mallocTracker.hpp" +#include "services/memTracker.hpp" + +inline void* MallocTracker::get_base(void* memblock){ + return get_base(memblock, MemTracker::tracking_level()); +} + +#endif // SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP + --- ./hotspot/src/share/vm/services/management.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/management.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -39,6 +39,7 @@ #include "runtime/jniHandles.hpp" #include "runtime/os.hpp" #include "runtime/serviceThread.hpp" +#include "runtime/thread.inline.hpp" #include "services/classLoadingService.hpp" #include "services/diagnosticCommand.hpp" #include "services/diagnosticFramework.hpp" @@ -388,23 +389,6 @@ return (instanceOop) element(); } -// Helper functions -static JavaThread* find_java_thread_from_id(jlong thread_id) { - assert(Threads_lock->owned_by_self(), "Must hold Threads_lock"); - - JavaThread* java_thread = NULL; - // Sequential search for now. Need to do better optimization later. - for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) { - oop tobj = thread->threadObj(); - if (!thread->is_exiting() && - tobj != NULL && - thread_id == java_lang_Thread::thread_id(tobj)) { - java_thread = thread; - break; - } - } - return java_thread; -} static GCMemoryManager* get_gc_memory_manager_from_jobject(jobject mgr, TRAPS) { if (mgr == NULL) { @@ -441,6 +425,8 @@ return MemoryService::get_memory_pool(ph); } +#endif // INCLUDE_MANAGEMENT + static void validate_thread_id_array(typeArrayHandle ids_ah, TRAPS) { int num_threads = ids_ah->length(); @@ -456,6 +442,8 @@ } } +#if INCLUDE_MANAGEMENT + static void validate_thread_info_array(objArrayHandle infoArray_h, TRAPS) { // check if the element of infoArray is of type ThreadInfo class Klass* threadinfo_klass = Management::java_lang_management_ThreadInfo_klass(CHECK); @@ -819,45 +807,6 @@ return prev; JVM_END -// Gets an array containing the amount of memory allocated on the Java -// heap for a set of threads (in bytes). Each element of the array is -// the amount of memory allocated for the thread ID specified in the -// corresponding entry in the given array of thread IDs; or -1 if the -// thread does not exist or has terminated. -JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids, - jlongArray sizeArray)) - // Check if threads is null - if (ids == NULL || sizeArray == NULL) { - THROW(vmSymbols::java_lang_NullPointerException()); - } - - ResourceMark rm(THREAD); - typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids)); - typeArrayHandle ids_ah(THREAD, ta); - - typeArrayOop sa = typeArrayOop(JNIHandles::resolve_non_null(sizeArray)); - typeArrayHandle sizeArray_h(THREAD, sa); - - // validate the thread id array - validate_thread_id_array(ids_ah, CHECK); - - // sizeArray must be of the same length as the given array of thread IDs - int num_threads = ids_ah->length(); - if (num_threads != sizeArray_h->length()) { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "The length of the given long array does not match the length of " - "the given array of thread IDs"); - } - - MutexLockerEx ml(Threads_lock); - for (int i = 0; i < num_threads; i++) { - JavaThread* java_thread = find_java_thread_from_id(ids_ah->long_at(i)); - if (java_thread != NULL) { - sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes()); - } - } -JVM_END - // Returns a java/lang/management/MemoryUsage object representing // the memory usage for the heap or non-heap memory. JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap)) @@ -1163,7 +1112,7 @@ MutexLockerEx ml(Threads_lock); for (int i = 0; i < num_threads; i++) { jlong tid = ids_ah->long_at(i); - JavaThread* jt = find_java_thread_from_id(tid); + JavaThread* jt = Threads::find_java_thread_from_java_tid(tid); oop thread_obj = (jt != NULL ? jt->threadObj() : (oop)NULL); instanceHandle threadObj_h(THREAD, (instanceOop) thread_obj); thread_handle_array->append(threadObj_h); @@ -1242,7 +1191,7 @@ MutexLockerEx ml(Threads_lock); for (int i = 0; i < num_threads; i++) { jlong tid = ids_ah->long_at(i); - JavaThread* jt = find_java_thread_from_id(tid); + JavaThread* jt = Threads::find_java_thread_from_java_tid(tid); ThreadSnapshot* ts; if (jt == NULL) { // if the thread does not exist or now it is terminated, @@ -1488,7 +1437,7 @@ } } else { // reset contention statistics for a given thread - JavaThread* java_thread = find_java_thread_from_id(tid); + JavaThread* java_thread = Threads::find_java_thread_from_java_tid(tid); if (java_thread == NULL) { return false; } @@ -1557,7 +1506,7 @@ return os::current_thread_cpu_time(); } else { MutexLockerEx ml(Threads_lock); - java_thread = find_java_thread_from_id(thread_id); + java_thread = Threads::find_java_thread_from_java_tid(thread_id); if (java_thread != NULL) { return os::thread_cpu_time((Thread*) java_thread); } @@ -1565,78 +1514,6 @@ return -1; JVM_END -// Returns the CPU time consumed by a given thread (in nanoseconds). -// If thread_id == 0, CPU time for the current thread is returned. -// If user_sys_cpu_time = true, user level and system CPU time of -// a given thread is returned; otherwise, only user level CPU time -// is returned. -JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboolean user_sys_cpu_time)) - if (!os::is_thread_cpu_time_supported()) { - return -1; - } - - if (thread_id < 0) { - THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), - "Invalid thread ID", -1); - } - - JavaThread* java_thread = NULL; - if (thread_id == 0) { - // current thread - return os::current_thread_cpu_time(user_sys_cpu_time != 0); - } else { - MutexLockerEx ml(Threads_lock); - java_thread = find_java_thread_from_id(thread_id); - if (java_thread != NULL) { - return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0); - } - } - return -1; -JVM_END - -// Gets an array containing the CPU times consumed by a set of threads -// (in nanoseconds). Each element of the array is the CPU time for the -// thread ID specified in the corresponding entry in the given array -// of thread IDs; or -1 if the thread does not exist or has terminated. -// If user_sys_cpu_time = true, the sum of user level and system CPU time -// for the given thread is returned; otherwise, only user level CPU time -// is returned. -JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids, - jlongArray timeArray, - jboolean user_sys_cpu_time)) - // Check if threads is null - if (ids == NULL || timeArray == NULL) { - THROW(vmSymbols::java_lang_NullPointerException()); - } - - ResourceMark rm(THREAD); - typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids)); - typeArrayHandle ids_ah(THREAD, ta); - - typeArrayOop tia = typeArrayOop(JNIHandles::resolve_non_null(timeArray)); - typeArrayHandle timeArray_h(THREAD, tia); - - // validate the thread id array - validate_thread_id_array(ids_ah, CHECK); - - // timeArray must be of the same length as the given array of thread IDs - int num_threads = ids_ah->length(); - if (num_threads != timeArray_h->length()) { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "The length of the given long array does not match the length of " - "the given array of thread IDs"); - } - - MutexLockerEx ml(Threads_lock); - for (int i = 0; i < num_threads; i++) { - JavaThread* java_thread = find_java_thread_from_id(ids_ah->long_at(i)); - if (java_thread != NULL) { - timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread, - user_sys_cpu_time != 0)); - } - } -JVM_END - // Returns a String array of all VM global flag names JVM_ENTRY(jobjectArray, jmm_GetVMGlobalNames(JNIEnv *env)) // last flag entry is always NULL, so subtract 1 @@ -1823,7 +1700,7 @@ "This flag is not writeable."); } - bool succeed; + bool succeed = false; if (flag->is_bool()) { bool bvalue = (new_value.z == JNI_TRUE ? true : false); succeed = CommandLineFlags::boolAtPut(name, &bvalue, Flag::MANAGEMENT); @@ -1855,6 +1732,9 @@ } ccstr svalue = java_lang_String::as_utf8_string(str); succeed = CommandLineFlags::ccstrAtPut(name, &svalue, Flag::MANAGEMENT); + if (succeed) { + FREE_C_HEAP_ARRAY(char, svalue, mtInternal); + } } assert(succeed, "Setting flag should succeed"); JVM_END @@ -2319,7 +2199,122 @@ return (jlong)(((double)ticks / (double)os::elapsed_frequency()) * (double)1000.0); } +#endif // INCLUDE_MANAGEMENT +// Gets an array containing the amount of memory allocated on the Java +// heap for a set of threads (in bytes). Each element of the array is +// the amount of memory allocated for the thread ID specified in the +// corresponding entry in the given array of thread IDs; or -1 if the +// thread does not exist or has terminated. +JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids, + jlongArray sizeArray)) + // Check if threads is null + if (ids == NULL || sizeArray == NULL) { + THROW(vmSymbols::java_lang_NullPointerException()); + } + + ResourceMark rm(THREAD); + typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids)); + typeArrayHandle ids_ah(THREAD, ta); + + typeArrayOop sa = typeArrayOop(JNIHandles::resolve_non_null(sizeArray)); + typeArrayHandle sizeArray_h(THREAD, sa); + + // validate the thread id array + validate_thread_id_array(ids_ah, CHECK); + + // sizeArray must be of the same length as the given array of thread IDs + int num_threads = ids_ah->length(); + if (num_threads != sizeArray_h->length()) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "The length of the given long array does not match the length of " + "the given array of thread IDs"); + } + + MutexLockerEx ml(Threads_lock); + for (int i = 0; i < num_threads; i++) { + JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i)); + if (java_thread != NULL) { + sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes()); + } + } +JVM_END + +// Returns the CPU time consumed by a given thread (in nanoseconds). +// If thread_id == 0, CPU time for the current thread is returned. +// If user_sys_cpu_time = true, user level and system CPU time of +// a given thread is returned; otherwise, only user level CPU time +// is returned. +JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboolean user_sys_cpu_time)) + if (!os::is_thread_cpu_time_supported()) { + return -1; + } + + if (thread_id < 0) { + THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), + "Invalid thread ID", -1); + } + + JavaThread* java_thread = NULL; + if (thread_id == 0) { + // current thread + return os::current_thread_cpu_time(user_sys_cpu_time != 0); + } else { + MutexLockerEx ml(Threads_lock); + java_thread = Threads::find_java_thread_from_java_tid(thread_id); + if (java_thread != NULL) { + return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0); + } + } + return -1; +JVM_END + +// Gets an array containing the CPU times consumed by a set of threads +// (in nanoseconds). Each element of the array is the CPU time for the +// thread ID specified in the corresponding entry in the given array +// of thread IDs; or -1 if the thread does not exist or has terminated. +// If user_sys_cpu_time = true, the sum of user level and system CPU time +// for the given thread is returned; otherwise, only user level CPU time +// is returned. +JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids, + jlongArray timeArray, + jboolean user_sys_cpu_time)) + // Check if threads is null + if (ids == NULL || timeArray == NULL) { + THROW(vmSymbols::java_lang_NullPointerException()); + } + + ResourceMark rm(THREAD); + typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids)); + typeArrayHandle ids_ah(THREAD, ta); + + typeArrayOop tia = typeArrayOop(JNIHandles::resolve_non_null(timeArray)); + typeArrayHandle timeArray_h(THREAD, tia); + + // validate the thread id array + validate_thread_id_array(ids_ah, CHECK); + + // timeArray must be of the same length as the given array of thread IDs + int num_threads = ids_ah->length(); + if (num_threads != timeArray_h->length()) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "The length of the given long array does not match the length of " + "the given array of thread IDs"); + } + + MutexLockerEx ml(Threads_lock); + for (int i = 0; i < num_threads; i++) { + JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i)); + if (java_thread != NULL) { + timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread, + user_sys_cpu_time != 0)); + } + } +JVM_END + + + +#if INCLUDE_MANAGEMENT const struct jmmInterface_1_ jmm_interface = { NULL, NULL, --- ./hotspot/src/share/vm/services/memBaseline.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/memBaseline.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,471 +22,279 @@ * */ #include "precompiled.hpp" + #include "memory/allocation.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.inline.hpp" #include "services/memBaseline.hpp" #include "services/memTracker.hpp" - -MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = { - {mtJavaHeap, "Java Heap"}, - {mtClass, "Class"}, - {mtThreadStack,"Thread Stack"}, - {mtThread, "Thread"}, - {mtCode, "Code"}, - {mtGC, "GC"}, - {mtCompiler, "Compiler"}, - {mtInternal, "Internal"}, - {mtOther, "Other"}, - {mtSymbol, "Symbol"}, - {mtNMT, "Memory Tracking"}, - {mtTracing, "Tracing"}, - {mtChunk, "Pooled Free Chunks"}, - {mtClassShared,"Shared spaces for classes"}, - {mtTest, "Test"}, - {mtNone, "Unknown"} // It can happen when type tagging records are lagging - // behind -}; - -MemBaseline::MemBaseline() { - _baselined = false; - - for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { - _malloc_data[index].set_type(MemType2NameMap[index]._flag); - _vm_data[index].set_type(MemType2NameMap[index]._flag); - _arena_data[index].set_type(MemType2NameMap[index]._flag); - } - - _malloc_cs = NULL; - _vm_cs = NULL; - _vm_map = NULL; - - _number_of_classes = 0; - _number_of_threads = 0; -} - - -void MemBaseline::clear() { - if (_malloc_cs != NULL) { - delete _malloc_cs; - _malloc_cs = NULL; - } - - if (_vm_cs != NULL) { - delete _vm_cs; - _vm_cs = NULL; - } - - if (_vm_map != NULL) { - delete _vm_map; - _vm_map = NULL; - } - - reset(); -} - - -void MemBaseline::reset() { - _baselined = false; - _total_vm_reserved = 0; - _total_vm_committed = 0; - _total_malloced = 0; - _number_of_classes = 0; - - if (_malloc_cs != NULL) _malloc_cs->clear(); - if (_vm_cs != NULL) _vm_cs->clear(); - if (_vm_map != NULL) _vm_map->clear(); - - for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { - _malloc_data[index].clear(); - _vm_data[index].clear(); - _arena_data[index].clear(); +/* + * Sizes are sorted in descenting order for reporting + */ +int compare_malloc_size(const MallocSite& s1, const MallocSite& s2) { + if (s1.size() == s2.size()) { + return 0; + } else if (s1.size() > s2.size()) { + return -1; + } else { + return 1; } } -MemBaseline::~MemBaseline() { - clear(); + +int compare_virtual_memory_size(const VirtualMemoryAllocationSite& s1, + const VirtualMemoryAllocationSite& s2) { + if (s1.reserved() == s2.reserved()) { + return 0; + } else if (s1.reserved() > s2.reserved()) { + return -1; + } else { + return 1; + } } -// baseline malloc'd memory records, generate overall summary and summaries by -// memory types -bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) { - MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records); - MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current(); - size_t used_arena_size = 0; - int index; - while (malloc_ptr != NULL) { - index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags())); - size_t size = malloc_ptr->size(); - if (malloc_ptr->is_arena_memory_record()) { - // We do have anonymous arenas, they are either used as value objects, - // which are embedded inside other objects, or used as stack objects. - _arena_data[index].inc(size); - used_arena_size += size; +// Sort into allocation site addresses order for baseline comparison +int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) { + return s1.call_stack()->compare(*s2.call_stack()); +} + + +int compare_virtual_memory_site(const VirtualMemoryAllocationSite& s1, + const VirtualMemoryAllocationSite& s2) { + return s1.call_stack()->compare(*s2.call_stack()); +} + +/* + * Walker to walk malloc allocation site table + */ +class MallocAllocationSiteWalker : public MallocSiteWalker { + private: + SortedLinkedList _malloc_sites; + size_t _count; + + // Entries in MallocSiteTable with size = 0 and count = 0, + // when the malloc site is not longer there. + public: + MallocAllocationSiteWalker() : _count(0) { } + + inline size_t count() const { return _count; } + + LinkedList* malloc_sites() { + return &_malloc_sites; + } + + bool do_malloc_site(const MallocSite* site) { + if (site->size() >= MemBaseline::SIZE_THRESHOLD) { + if (_malloc_sites.add(*site) != NULL) { + _count++; + return true; + } else { + return false; // OOM + } } else { - _total_malloced += size; - _malloc_data[index].inc(size); - if (malloc_ptr->is_arena_record()) { - // see if arena memory record present - MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next(); - if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) { - assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr), - "Arena records do not match"); - size = next_malloc_ptr->size(); - _arena_data[index].inc(size); - used_arena_size += size; - malloc_itr.next(); - } + // malloc site does not meet threshold, ignore and continue + return true; + } + } +}; + +// Compare virtual memory region's base address +int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) { + return r1.compare(r2); +} + +// Walk all virtual memory regions for baselining +class VirtualMemoryAllocationWalker : public VirtualMemoryWalker { + private: + SortedLinkedList + _virtual_memory_regions; + size_t _count; + + public: + VirtualMemoryAllocationWalker() : _count(0) { } + + bool do_allocation_site(const ReservedMemoryRegion* rgn) { + if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) { + if (_virtual_memory_regions.add(*rgn) != NULL) { + _count ++; + return true; + } else { + return false; } } - malloc_ptr = (MemPointerRecordEx*)malloc_itr.next(); + return true; } - // substract used arena size to get size of arena chunk in free list - index = flag2index(mtChunk); - _malloc_data[index].reduce(used_arena_size); - // we really don't know how many chunks in free list, so just set to - // 0 - _malloc_data[index].overwrite_counter(0); + LinkedList* virtual_memory_allocations() { + return &_virtual_memory_regions; + } +}; + + +bool MemBaseline::baseline_summary() { + MallocMemorySummary::snapshot(&_malloc_memory_snapshot); + VirtualMemorySummary::snapshot(&_virtual_memory_snapshot); + return true; +} + +bool MemBaseline::baseline_allocation_sites() { + // Malloc allocation sites + MallocAllocationSiteWalker malloc_walker; + if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) { + return false; + } + + _malloc_sites.move(malloc_walker.malloc_sites()); + // The malloc sites are collected in size order + _malloc_sites_order = by_size; + + // Virtual memory allocation sites + VirtualMemoryAllocationWalker virtual_memory_walker; + if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) { + return false; + } + + // Virtual memory allocations are collected in call stack order + _virtual_memory_allocations.move(virtual_memory_walker.virtual_memory_allocations()); + + if (!aggregate_virtual_memory_allocation_sites()) { + return false; + } + // Virtual memory allocation sites are aggregrated in call stack order + _virtual_memory_sites_order = by_address; return true; } -// check if there is a safepoint in progress, if so, block the thread -// for the safepoint -void MemBaseline::check_safepoint(JavaThread* thr) { - if (SafepointSynchronize::is_synchronizing()) { - // grab and drop the SR_lock to honor the safepoint protocol - MutexLocker ml(thr->SR_lock()); - } -} +bool MemBaseline::baseline(bool summaryOnly) { + reset(); -// baseline mmap'd memory records, generate overall summary and summaries by -// memory types -bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) { - MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records); - VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current(); - int index; - while (vm_ptr != NULL) { - if (vm_ptr->is_reserved_region()) { - index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags())); - // we use the number of thread stack to count threads - if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) { - _number_of_threads ++; - } - _total_vm_reserved += vm_ptr->size(); - _vm_data[index].inc(vm_ptr->size(), 0); - } else { - _total_vm_committed += vm_ptr->size(); - _vm_data[index].inc(0, vm_ptr->size()); - } - vm_ptr = (VMMemRegion*)vm_itr.next(); - } - return true; -} + _class_count = InstanceKlass::number_of_instance_classes(); -// baseline malloc'd memory by callsites, but only the callsites with memory allocation -// over 1KB are stored. -bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) { - assert(MemTracker::track_callsite(), "detail tracking is off"); - - MemPointerArrayIteratorImpl malloc_itr(const_cast(malloc_records)); - MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current(); - MallocCallsitePointer malloc_callsite; - - // initailize malloc callsite array - if (_malloc_cs == NULL) { - _malloc_cs = new (std::nothrow) MemPointerArrayImpl(64); - // out of native memory - if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) { - return false; - } - } else { - _malloc_cs->clear(); - } - - MemPointerArray* malloc_data = const_cast(malloc_records); - - // sort into callsite pc order. Details are aggregated by callsites - malloc_data->sort((FN_SORT)malloc_sort_by_pc); - bool ret = true; - - // baseline memory that is totaled over 1 KB - while (malloc_ptr != NULL) { - if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) { - // skip thread stacks - if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) { - if (malloc_callsite.addr() != malloc_ptr->pc()) { - if ((malloc_callsite.amount()/K) > 0) { - if (!_malloc_cs->append(&malloc_callsite)) { - ret = false; - break; - } - } - malloc_callsite = MallocCallsitePointer(malloc_ptr->pc()); - } - malloc_callsite.inc(malloc_ptr->size()); - } - } - malloc_ptr = (MemPointerRecordEx*)malloc_itr.next(); - } - - // restore to address order. Snapshot malloc data is maintained in memory - // address order. - malloc_data->sort((FN_SORT)malloc_sort_by_addr); - - if (!ret) { - return false; - } - // deal with last record - if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) { - if (!_malloc_cs->append(&malloc_callsite)) { - return false; - } - } - return true; -} - -// baseline mmap'd memory by callsites -bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) { - assert(MemTracker::track_callsite(), "detail tracking is off"); - - VMCallsitePointer vm_callsite; - VMCallsitePointer* cur_callsite = NULL; - MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records); - VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current(); - - // initialize virtual memory map array - if (_vm_map == NULL) { - _vm_map = new (std::nothrow) MemPointerArrayImpl(vm_records->length()); - if (_vm_map == NULL || _vm_map->out_of_memory()) { - return false; - } - } else { - _vm_map->clear(); - } - - // initialize virtual memory callsite array - if (_vm_cs == NULL) { - _vm_cs = new (std::nothrow) MemPointerArrayImpl(64); - if (_vm_cs == NULL || _vm_cs->out_of_memory()) { - return false; - } - } else { - _vm_cs->clear(); - } - - // consolidate virtual memory data - VMMemRegionEx* reserved_rec = NULL; - VMMemRegionEx* committed_rec = NULL; - - // vm_ptr is coming in increasing base address order - while (vm_ptr != NULL) { - if (vm_ptr->is_reserved_region()) { - // consolidate reserved memory regions for virtual memory map. - // The criteria for consolidation is: - // 1. two adjacent reserved memory regions - // 2. belong to the same memory type - // 3. reserved from the same callsite - if (reserved_rec == NULL || - reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() || - FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) || - reserved_rec->pc() != vm_ptr->pc()) { - if (!_vm_map->append(vm_ptr)) { - return false; - } - // inserted reserved region, we need the pointer to the element in virtual - // memory map array. - reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1); - } else { - reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size()); - } - - if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) { - return false; - } - vm_callsite = VMCallsitePointer(vm_ptr->pc()); - cur_callsite = &vm_callsite; - vm_callsite.inc(vm_ptr->size(), 0); - } else { - // consolidate committed memory regions for virtual memory map - // The criterial is: - // 1. two adjacent committed memory regions - // 2. committed from the same callsite - if (committed_rec == NULL || - committed_rec->base() + committed_rec->size() != vm_ptr->addr() || - committed_rec->pc() != vm_ptr->pc()) { - if (!_vm_map->append(vm_ptr)) { - return false; - } - committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1); - } else { - committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size()); - } - vm_callsite.inc(0, vm_ptr->size()); - } - vm_ptr = (VMMemRegionEx*)vm_itr.next(); - } - // deal with last record - if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) { + if (!baseline_summary()) { return false; } - // sort it into callsite pc order. Details are aggregated by callsites - _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc); + _baseline_type = Summary_baselined; - // walk the array to consolidate record by pc - MemPointerArrayIteratorImpl itr(_vm_cs); - VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current(); - VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next(); - while (next_rec != NULL) { - assert(callsite_rec != NULL, "Sanity check"); - if (next_rec->addr() == callsite_rec->addr()) { - callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount()); - itr.remove(); - next_rec = (VMCallsitePointer*)itr.current(); - } else { - callsite_rec = next_rec; - next_rec = (VMCallsitePointer*)itr.next(); - } + // baseline details + if (!summaryOnly && + MemTracker::tracking_level() == NMT_detail) { + baseline_allocation_sites(); + _baseline_type = Detail_baselined; } return true; } -// baseline a snapshot. If summary_only = false, memory usages aggregated by -// callsites are also baselined. -// The method call can be lengthy, especially when detail tracking info is -// requested. So the method checks for safepoint explicitly. -bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) { - Thread* THREAD = Thread::current(); - assert(THREAD->is_Java_thread(), "must be a JavaThread"); - MutexLocker snapshot_locker(snapshot._lock); - reset(); - _baselined = baseline_malloc_summary(snapshot._alloc_ptrs); - if (_baselined) { - check_safepoint((JavaThread*)THREAD); - _baselined = baseline_vm_summary(snapshot._vm_ptrs); - } - _number_of_classes = snapshot.number_of_classes(); - - if (!summary_only && MemTracker::track_callsite() && _baselined) { - check_safepoint((JavaThread*)THREAD); - _baselined = baseline_malloc_details(snapshot._alloc_ptrs); - if (_baselined) { - check_safepoint((JavaThread*)THREAD); - _baselined = baseline_vm_details(snapshot._vm_ptrs); - } - } - return _baselined; +int compare_allocation_site(const VirtualMemoryAllocationSite& s1, + const VirtualMemoryAllocationSite& s2) { + return s1.call_stack()->compare(*s2.call_stack()); } +bool MemBaseline::aggregate_virtual_memory_allocation_sites() { + SortedLinkedList allocation_sites; -int MemBaseline::flag2index(MEMFLAGS flag) const { - for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { - if (MemType2NameMap[index]._flag == flag) { - return index; + VirtualMemoryAllocationIterator itr = virtual_memory_allocations(); + const ReservedMemoryRegion* rgn; + VirtualMemoryAllocationSite* site; + while ((rgn = itr.next()) != NULL) { + VirtualMemoryAllocationSite tmp(*rgn->call_stack()); + site = allocation_sites.find(tmp); + if (site == NULL) { + LinkedListNode* node = + allocation_sites.add(tmp); + if (node == NULL) return false; + site = node->data(); } + site->reserve_memory(rgn->size()); + site->commit_memory(rgn->committed_size()); } - assert(false, "no type"); - return -1; + + _virtual_memory_sites.move(&allocation_sites); + return true; } -const char* MemBaseline::type2name(MEMFLAGS type) { - for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { - if (MemType2NameMap[index]._flag == type) { - return MemType2NameMap[index]._name; - } +MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) { + assert(!_malloc_sites.is_empty(), "Not detail baseline"); + switch(order) { + case by_size: + malloc_sites_to_size_order(); + break; + case by_site: + malloc_sites_to_allocation_site_order(); + break; + case by_address: + default: + ShouldNotReachHere(); } - assert(false, err_msg("bad type %x", type)); - return NULL; + return MallocSiteIterator(_malloc_sites.head()); } - -MemBaseline& MemBaseline::operator=(const MemBaseline& other) { - _total_malloced = other._total_malloced; - _total_vm_reserved = other._total_vm_reserved; - _total_vm_committed = other._total_vm_committed; - - _baselined = other._baselined; - _number_of_classes = other._number_of_classes; - - for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { - _malloc_data[index] = other._malloc_data[index]; - _vm_data[index] = other._vm_data[index]; - _arena_data[index] = other._arena_data[index]; +VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) { + assert(!_virtual_memory_sites.is_empty(), "Not detail baseline"); + switch(order) { + case by_size: + virtual_memory_sites_to_size_order(); + break; + case by_site: + virtual_memory_sites_to_reservation_site_order(); + break; + case by_address: + default: + ShouldNotReachHere(); } - - if (MemTracker::track_callsite()) { - assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory"); - assert(other._malloc_cs != NULL && other._vm_cs != NULL, - "not properly baselined"); - _malloc_cs->clear(); - _vm_cs->clear(); - int index; - for (index = 0; index < other._malloc_cs->length(); index ++) { - _malloc_cs->append(other._malloc_cs->at(index)); - } - - for (index = 0; index < other._vm_cs->length(); index ++) { - _vm_cs->append(other._vm_cs->at(index)); - } - } - return *this; + return VirtualMemorySiteIterator(_virtual_memory_sites.head()); } -/* compare functions for sorting */ -// sort snapshot malloc'd records in callsite pc order -int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) { - assert(MemTracker::track_callsite(),"Just check"); - const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1; - const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2; - return UNSIGNED_COMPARE(mp1->pc(), mp2->pc()); +// Sorting allocations sites in different orders +void MemBaseline::malloc_sites_to_size_order() { + if (_malloc_sites_order != by_size) { + SortedLinkedList tmp; + + // Add malloc sites to sorted linked list to sort into size order + tmp.move(&_malloc_sites); + _malloc_sites.set_head(tmp.head()); + tmp.set_head(NULL); + _malloc_sites_order = by_size; + } } -// sort baselined malloc'd records in size order -int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) { - assert(MemTracker::is_on(), "Just check"); - const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1; - const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2; - return UNSIGNED_COMPARE(mp2->amount(), mp1->amount()); +void MemBaseline::malloc_sites_to_allocation_site_order() { + if (_malloc_sites_order != by_site) { + SortedLinkedList tmp; + // Add malloc sites to sorted linked list to sort into site (address) order + tmp.move(&_malloc_sites); + _malloc_sites.set_head(tmp.head()); + tmp.set_head(NULL); + _malloc_sites_order = by_site; + } } -// sort baselined malloc'd records in callsite pc order -int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) { - assert(MemTracker::is_on(), "Just check"); - const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1; - const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2; - return UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); +void MemBaseline::virtual_memory_sites_to_size_order() { + if (_virtual_memory_sites_order != by_size) { + SortedLinkedList tmp; + + tmp.move(&_virtual_memory_sites); + + _virtual_memory_sites.set_head(tmp.head()); + tmp.set_head(NULL); + _virtual_memory_sites_order = by_size; + } } +void MemBaseline::virtual_memory_sites_to_reservation_site_order() { + if (_virtual_memory_sites_order != by_size) { + SortedLinkedList tmp; -// sort baselined mmap'd records in size (reserved size) order -int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) { - assert(MemTracker::is_on(), "Just check"); - const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1; - const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2; - return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount()); + tmp.move(&_virtual_memory_sites); + + _virtual_memory_sites.set_head(tmp.head()); + tmp.set_head(NULL); + + _virtual_memory_sites_order = by_size; + } } -// sort baselined mmap'd records in callsite pc order -int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) { - assert(MemTracker::is_on(), "Just check"); - const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1; - const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2; - return UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); -} - - -// sort snapshot malloc'd records in memory block address order -int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) { - assert(MemTracker::is_on(), "Just check"); - const MemPointerRecord* mp1 = (const MemPointerRecord*)p1; - const MemPointerRecord* mp2 = (const MemPointerRecord*)p2; - int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); - assert(p1 == p2 || delta != 0, "dup pointer"); - return delta; -} - --- ./hotspot/src/share/vm/services/memBaseline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/memBaseline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,425 +25,176 @@ #ifndef SHARE_VM_SERVICES_MEM_BASELINE_HPP #define SHARE_VM_SERVICES_MEM_BASELINE_HPP +#if INCLUDE_NMT + #include "memory/allocation.hpp" #include "runtime/mutex.hpp" -#include "services/memPtr.hpp" -#include "services/memSnapshot.hpp" +#include "services/mallocSiteTable.hpp" +#include "services/mallocTracker.hpp" +#include "services/nmtCommon.hpp" +#include "services/virtualMemoryTracker.hpp" +#include "utilities/linkedlist.hpp" -// compare unsigned number -#define UNSIGNED_COMPARE(a, b) ((a > b) ? 1 : ((a == b) ? 0 : -1)) +typedef LinkedListIterator MallocSiteIterator; +typedef LinkedListIterator VirtualMemorySiteIterator; +typedef LinkedListIterator VirtualMemoryAllocationIterator; /* - * MallocCallsitePointer and VMCallsitePointer are used - * to baseline memory blocks with their callsite information. - * They are only available when detail tracking is turned - * on. + * Baseline a memory snapshot */ - -/* baselined malloc record aggregated by callsite */ -class MallocCallsitePointer : public MemPointer { - private: - size_t _count; // number of malloc invocation from this callsite - size_t _amount; // total amount of memory malloc-ed from this callsite - +class MemBaseline VALUE_OBJ_CLASS_SPEC { public: - MallocCallsitePointer() { - _count = 0; - _amount = 0; - } - - MallocCallsitePointer(address pc) : MemPointer(pc) { - _count = 0; - _amount = 0; - } - - MallocCallsitePointer& operator=(const MallocCallsitePointer& p) { - MemPointer::operator=(p); - _count = p.count(); - _amount = p.amount(); - return *this; - } - - inline void inc(size_t size) { - _count ++; - _amount += size; + enum BaselineThreshold { + SIZE_THRESHOLD = K // Only allocation size over this threshold will be baselined. }; - inline size_t count() const { - return _count; - } + enum BaselineType { + Not_baselined, + Summary_baselined, + Detail_baselined + }; - inline size_t amount() const { - return _amount; - } -}; - -// baselined virtual memory record aggregated by callsite -class VMCallsitePointer : public MemPointer { - private: - size_t _count; // number of invocation from this callsite - size_t _reserved_amount; // total reserved amount - size_t _committed_amount; // total committed amount - - public: - VMCallsitePointer() { - _count = 0; - _reserved_amount = 0; - _committed_amount = 0; - } - - VMCallsitePointer(address pc) : MemPointer(pc) { - _count = 0; - _reserved_amount = 0; - _committed_amount = 0; - } - - VMCallsitePointer& operator=(const VMCallsitePointer& p) { - MemPointer::operator=(p); - _count = p.count(); - _reserved_amount = p.reserved_amount(); - _committed_amount = p.committed_amount(); - return *this; - } - - inline void inc(size_t reserved, size_t committed) { - _count ++; - _reserved_amount += reserved; - _committed_amount += committed; - } - - inline size_t count() const { - return _count; - } - - inline size_t reserved_amount() const { - return _reserved_amount; - } - - inline size_t committed_amount() const { - return _committed_amount; - } -}; - -// maps a memory type flag to readable name -typedef struct _memType2Name { - MEMFLAGS _flag; - const char* _name; -} MemType2Name; - - -// This class aggregates malloc'd records by memory type -class MallocMem VALUE_OBJ_CLASS_SPEC { - private: - MEMFLAGS _type; - - size_t _count; - size_t _amount; - - public: - MallocMem() { - _type = mtNone; - _count = 0; - _amount = 0; - } - - MallocMem(MEMFLAGS flags) { - assert(HAS_VALID_MEMORY_TYPE(flags), "no type"); - _type = FLAGS_TO_MEMORY_TYPE(flags); - _count = 0; - _amount = 0; - } - - inline void set_type(MEMFLAGS flag) { - _type = flag; - } - - inline void clear() { - _count = 0; - _amount = 0; - _type = mtNone; - } - - MallocMem& operator=(const MallocMem& m) { - assert(_type == m.type(), "different type"); - _count = m.count(); - _amount = m.amount(); - return *this; - } - - inline void inc(size_t amt) { - _amount += amt; - _count ++; - } - - inline void reduce(size_t amt) { - assert(_amount >= amt, "Just check"); - _amount -= amt; - } - - inline void overwrite_counter(size_t count) { - _count = count; - } - - inline MEMFLAGS type() const { - return _type; - } - - inline bool is_type(MEMFLAGS flags) const { - return FLAGS_TO_MEMORY_TYPE(flags) == _type; - } - - inline size_t count() const { - return _count; - } - - inline size_t amount() const { - return _amount; - } -}; - -// This class records live arena's memory usage -class ArenaMem : public MallocMem { - public: - ArenaMem(MEMFLAGS typeflag): MallocMem(typeflag) { - } - ArenaMem() { } -}; - -// This class aggregates virtual memory by its memory type -class VMMem VALUE_OBJ_CLASS_SPEC { - private: - MEMFLAGS _type; - - size_t _count; - size_t _reserved_amount; - size_t _committed_amount; - - public: - VMMem() { - _type = mtNone; - _count = 0; - _reserved_amount = 0; - _committed_amount = 0; - } - - VMMem(MEMFLAGS flags) { - assert(HAS_VALID_MEMORY_TYPE(flags), "no type"); - _type = FLAGS_TO_MEMORY_TYPE(flags); - _count = 0; - _reserved_amount = 0; - _committed_amount = 0; - } - - inline void clear() { - _type = mtNone; - _count = 0; - _reserved_amount = 0; - _committed_amount = 0; - } - - inline void set_type(MEMFLAGS flags) { - _type = FLAGS_TO_MEMORY_TYPE(flags); - } - - VMMem& operator=(const VMMem& m) { - assert(_type == m.type(), "different type"); - - _count = m.count(); - _reserved_amount = m.reserved_amount(); - _committed_amount = m.committed_amount(); - return *this; - } - - - inline MEMFLAGS type() const { - return _type; - } - - inline bool is_type(MEMFLAGS flags) const { - return FLAGS_TO_MEMORY_TYPE(flags) == _type; - } - - inline void inc(size_t reserved_amt, size_t committed_amt) { - _reserved_amount += reserved_amt; - _committed_amount += committed_amt; - _count ++; - } - - inline size_t count() const { - return _count; - } - - inline size_t reserved_amount() const { - return _reserved_amount; - } - - inline size_t committed_amount() const { - return _committed_amount; - } -}; - - - -#define NUMBER_OF_MEMORY_TYPE (mt_number_of_types + 1) - -class BaselineReporter; -class BaselineComparisonReporter; - -/* - * This class baselines current memory snapshot. - * A memory baseline summarizes memory usage by memory type, - * aggregates memory usage by callsites when detail tracking - * is on. - */ -class MemBaseline VALUE_OBJ_CLASS_SPEC { - friend class BaselineReporter; - friend class BaselineComparisonReporter; + enum SortingOrder { + by_address, // by memory address + by_size, // by memory size + by_site // by call site where the memory is allocated from + }; private: - // overall summaries - size_t _total_malloced; - size_t _total_vm_reserved; - size_t _total_vm_committed; - size_t _number_of_classes; - size_t _number_of_threads; + // Summary information + MallocMemorySnapshot _malloc_memory_snapshot; + VirtualMemorySnapshot _virtual_memory_snapshot; - // if it has properly baselined - bool _baselined; + size_t _class_count; - // we categorize memory into three categories within the memory type - MallocMem _malloc_data[NUMBER_OF_MEMORY_TYPE]; - VMMem _vm_data[NUMBER_OF_MEMORY_TYPE]; - ArenaMem _arena_data[NUMBER_OF_MEMORY_TYPE]; + // Allocation sites information + // Malloc allocation sites + LinkedListImpl _malloc_sites; - // memory records that aggregate memory usage by callsites. - // only available when detail tracking is on. - MemPointerArray* _malloc_cs; - MemPointerArray* _vm_cs; - // virtual memory map - MemPointerArray* _vm_map; + // All virtual memory allocations + LinkedListImpl _virtual_memory_allocations; - private: - static MemType2Name MemType2NameMap[NUMBER_OF_MEMORY_TYPE]; + // Virtual memory allocations by allocation sites, always in by_address + // order + LinkedListImpl _virtual_memory_sites; - private: - // should not use copy constructor - MemBaseline(MemBaseline& copy) { ShouldNotReachHere(); } + SortingOrder _malloc_sites_order; + SortingOrder _virtual_memory_sites_order; - // check and block at a safepoint - static inline void check_safepoint(JavaThread* thr); + BaselineType _baseline_type; public: // create a memory baseline - MemBaseline(); - - ~MemBaseline(); - - inline bool baselined() const { - return _baselined; + MemBaseline(): + _baseline_type(Not_baselined), + _class_count(0) { } - MemBaseline& operator=(const MemBaseline& other); + bool baseline(bool summaryOnly = true); + + BaselineType baseline_type() const { return _baseline_type; } + + MallocMemorySnapshot* malloc_memory_snapshot() { + return &_malloc_memory_snapshot; + } + + VirtualMemorySnapshot* virtual_memory_snapshot() { + return &_virtual_memory_snapshot; + } + + MallocSiteIterator malloc_sites(SortingOrder order); + VirtualMemorySiteIterator virtual_memory_sites(SortingOrder order); + + // Virtual memory allocation iterator always returns in virtual memory + // base address order. + VirtualMemoryAllocationIterator virtual_memory_allocations() { + assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline"); + return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head()); + } + + // Total reserved memory = total malloc'd memory + total reserved virtual + // memory + size_t total_reserved_memory() const { + assert(baseline_type() != Not_baselined, "Not yet baselined"); + size_t amount = _malloc_memory_snapshot.total() + + _virtual_memory_snapshot.total_reserved(); + return amount; + } + + // Total committed memory = total malloc'd memory + total committed + // virtual memory + size_t total_committed_memory() const { + assert(baseline_type() != Not_baselined, "Not yet baselined"); + size_t amount = _malloc_memory_snapshot.total() + + _virtual_memory_snapshot.total_committed(); + return amount; + } + + size_t total_arena_memory() const { + assert(baseline_type() != Not_baselined, "Not yet baselined"); + return _malloc_memory_snapshot.total_arena(); + } + + size_t malloc_tracking_overhead() const { + assert(baseline_type() != Not_baselined, "Not yet baselined"); + MemBaseline* bl = const_cast(this); + return bl->_malloc_memory_snapshot.malloc_overhead()->size(); + } + + MallocMemory* malloc_memory(MEMFLAGS flag) { + assert(baseline_type() != Not_baselined, "Not yet baselined"); + return _malloc_memory_snapshot.by_type(flag); + } + + VirtualMemory* virtual_memory(MEMFLAGS flag) { + assert(baseline_type() != Not_baselined, "Not yet baselined"); + return _virtual_memory_snapshot.by_type(flag); + } + + + size_t class_count() const { + assert(baseline_type() != Not_baselined, "Not yet baselined"); + return _class_count; + } + + size_t thread_count() const { + assert(baseline_type() != Not_baselined, "Not yet baselined"); + return _malloc_memory_snapshot.thread_count(); + } // reset the baseline for reuse - void clear(); + void reset() { + _baseline_type = Not_baselined; + // _malloc_memory_snapshot and _virtual_memory_snapshot are copied over. + _class_count = 0; - // baseline the snapshot - bool baseline(MemSnapshot& snapshot, bool summary_only = true); - - bool baseline(const MemPointerArray* malloc_records, - const MemPointerArray* vm_records, - bool summary_only = true); - - // total malloc'd memory of specified memory type - inline size_t malloc_amount(MEMFLAGS flag) const { - return _malloc_data[flag2index(flag)].amount(); - } - // number of malloc'd memory blocks of specified memory type - inline size_t malloc_count(MEMFLAGS flag) const { - return _malloc_data[flag2index(flag)].count(); - } - // total memory used by arenas of specified memory type - inline size_t arena_amount(MEMFLAGS flag) const { - return _arena_data[flag2index(flag)].amount(); - } - // number of arenas of specified memory type - inline size_t arena_count(MEMFLAGS flag) const { - return _arena_data[flag2index(flag)].count(); - } - // total reserved memory of specified memory type - inline size_t reserved_amount(MEMFLAGS flag) const { - return _vm_data[flag2index(flag)].reserved_amount(); - } - // total committed memory of specified memory type - inline size_t committed_amount(MEMFLAGS flag) const { - return _vm_data[flag2index(flag)].committed_amount(); - } - // total memory (malloc'd + mmap'd + arena) of specified - // memory type - inline size_t total_amount(MEMFLAGS flag) const { - int index = flag2index(flag); - return _malloc_data[index].amount() + - _vm_data[index].reserved_amount() + - _arena_data[index].amount(); + _malloc_sites.clear(); + _virtual_memory_sites.clear(); + _virtual_memory_allocations.clear(); } - /* overall summaries */ + private: + // Baseline summary information + bool baseline_summary(); - // total malloc'd memory in snapshot - inline size_t total_malloc_amount() const { - return _total_malloced; - } - // total mmap'd memory in snapshot - inline size_t total_reserved_amount() const { - return _total_vm_reserved; - } - // total committed memory in snapshot - inline size_t total_committed_amount() const { - return _total_vm_committed; - } - // number of loaded classes - inline size_t number_of_classes() const { - return _number_of_classes; - } - // number of running threads - inline size_t number_of_threads() const { - return _number_of_threads; - } - // lookup human readable name of a memory type - static const char* type2name(MEMFLAGS type); + // Baseline allocation sites (detail tracking only) + bool baseline_allocation_sites(); - private: - // convert memory flag to the index to mapping table - int flag2index(MEMFLAGS flag) const; + // Aggregate virtual memory allocation by allocation sites + bool aggregate_virtual_memory_allocation_sites(); - // reset baseline values - void reset(); + // Sorting allocation sites in different orders + // Sort allocation sites in size order + void malloc_sites_to_size_order(); + // Sort allocation sites in call site address order + void malloc_sites_to_allocation_site_order(); - // summarize the records in global snapshot - bool baseline_malloc_summary(const MemPointerArray* malloc_records); - bool baseline_vm_summary(const MemPointerArray* vm_records); - bool baseline_malloc_details(const MemPointerArray* malloc_records); - bool baseline_vm_details(const MemPointerArray* vm_records); - - // print a line of malloc'd memory aggregated by callsite - void print_malloc_callsite(outputStream* st, address pc, size_t size, - size_t count, int diff_amt, int diff_count) const; - // print a line of mmap'd memory aggregated by callsite - void print_vm_callsite(outputStream* st, address pc, size_t rsz, - size_t csz, int diff_rsz, int diff_csz) const; - - // sorting functions for raw records - static int malloc_sort_by_pc(const void* p1, const void* p2); - static int malloc_sort_by_addr(const void* p1, const void* p2); - - private: - // sorting functions for baselined records - static int bl_malloc_sort_by_size(const void* p1, const void* p2); - static int bl_vm_sort_by_size(const void* p1, const void* p2); - static int bl_malloc_sort_by_pc(const void* p1, const void* p2); - static int bl_vm_sort_by_pc(const void* p1, const void* p2); + // Sort allocation sites in reserved size order + void virtual_memory_sites_to_size_order(); + // Sort allocation sites in call site address order + void virtual_memory_sites_to_reservation_site_order(); }; +#endif // INCLUDE_NMT #endif // SHARE_VM_SERVICES_MEM_BASELINE_HPP --- ./hotspot/src/share/vm/services/memPtr.cpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "services/memPtr.hpp" -#include "services/memTracker.hpp" - -volatile jint SequenceGenerator::_seq_number = 1; -volatile unsigned long SequenceGenerator::_generation = 1; -NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;) - -jint SequenceGenerator::next() { - jint seq = Atomic::add(1, &_seq_number); - if (seq < 0) { - MemTracker::shutdown(MemTracker::NMT_sequence_overflow); - } else { - NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;) - } - return seq; -} - --- ./hotspot/src/share/vm/services/memPtr.hpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,510 +0,0 @@ -/* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_SERVICES_MEM_PTR_HPP -#define SHARE_VM_SERVICES_MEM_PTR_HPP - -#include "memory/allocation.hpp" -#include "runtime/atomic.hpp" -#include "runtime/os.hpp" -#include "runtime/safepoint.hpp" - -/* - * global sequence generator that generates sequence numbers to serialize - * memory records. - */ -class SequenceGenerator : AllStatic { - public: - static jint next(); - - // peek last sequence number - static jint peek() { - return _seq_number; - } - - // reset sequence number - static void reset() { - assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); - _seq_number = 1; - _generation ++; - }; - - static unsigned long current_generation() { return _generation; } - NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; }) - - private: - static volatile jint _seq_number; - static volatile unsigned long _generation; - NOT_PRODUCT(static jint _max_seq_number; ) -}; - -/* - * followings are the classes that are used to hold memory activity records in different stages. - * MemPointer - * |--------MemPointerRecord - * | - * |----MemPointerRecordEx - * | | - * | |-------SeqMemPointerRecordEx - * | - * |----SeqMemPointerRecord - * | - * |----VMMemRegion - * | - * |-----VMMemRegionEx - * - * - * prefix 'Seq' - sequenced, the record contains a sequence number - * surfix 'Ex' - extension, the record contains a caller's pc - * - * per-thread recorder : SeqMemPointerRecord(Ex) - * snapshot staging : SeqMemPointerRecord(Ex) - * snapshot : MemPointerRecord(Ex) and VMMemRegion(Ex) - * - */ - -/* - * class that wraps an address to a memory block, - * the memory pointer either points to a malloc'd - * memory block, or a mmap'd memory block - */ -class MemPointer VALUE_OBJ_CLASS_SPEC { - public: - MemPointer(): _addr(0) { } - MemPointer(address addr): _addr(addr) { } - - MemPointer(const MemPointer& copy_from) { - _addr = copy_from.addr(); - } - - inline address addr() const { - return _addr; - } - - inline operator address() const { - return addr(); - } - - inline bool operator == (const MemPointer& other) const { - return addr() == other.addr(); - } - - inline MemPointer& operator = (const MemPointer& other) { - _addr = other.addr(); - return *this; - } - - protected: - inline void set_addr(address addr) { _addr = addr; } - - protected: - // memory address - address _addr; -}; - -/* MemPointerRecord records an activityand associated - * attributes on a memory block. - */ -class MemPointerRecord : public MemPointer { - private: - MEMFLAGS _flags; - size_t _size; - -public: - /* extension of MemoryType enum - * see share/vm/memory/allocation.hpp for details. - * - * The tag values are associated to sorting orders, so be - * careful if changes are needed. - * The allocation records should be sorted ahead of tagging - * records, which in turn ahead of deallocation records - */ - enum MemPointerTags { - tag_alloc = 0x0001, // malloc or reserve record - tag_commit = 0x0002, // commit record - tag_type = 0x0003, // tag virtual memory to a memory type - tag_uncommit = 0x0004, // uncommit record - tag_release = 0x0005, // free or release record - tag_size = 0x0006, // arena size - tag_masks = 0x0007, // all tag bits - vmBit = 0x0008 - }; - - /* helper functions to interpret the tagging flags */ - - inline static bool is_allocation_record(MEMFLAGS flags) { - return (flags & tag_masks) == tag_alloc; - } - - inline static bool is_deallocation_record(MEMFLAGS flags) { - return (flags & tag_masks) == tag_release; - } - - inline static bool is_arena_record(MEMFLAGS flags) { - return (flags & (otArena | tag_size)) == otArena; - } - - inline static bool is_arena_memory_record(MEMFLAGS flags) { - return (flags & (otArena | tag_size)) == (otArena | tag_size); - } - - inline static bool is_virtual_memory_record(MEMFLAGS flags) { - return (flags & vmBit) != 0; - } - - inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) { - return (flags & 0x0F) == (tag_alloc | vmBit); - } - - inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) { - return (flags & 0x0F) == (tag_commit | vmBit); - } - - inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) { - return (flags & 0x0F) == (tag_uncommit | vmBit); - } - - inline static bool is_virtual_memory_release_record(MEMFLAGS flags) { - return (flags & 0x0F) == (tag_release | vmBit); - } - - inline static bool is_virtual_memory_type_record(MEMFLAGS flags) { - return (flags & 0x0F) == (tag_type | vmBit); - } - - /* tagging flags */ - inline static MEMFLAGS malloc_tag() { return tag_alloc; } - inline static MEMFLAGS free_tag() { return tag_release; } - inline static MEMFLAGS arena_size_tag() { return tag_size | otArena; } - inline static MEMFLAGS virtual_memory_tag() { return vmBit; } - inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); } - inline static MEMFLAGS virtual_memory_commit_tag() { return (tag_commit | vmBit); } - inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); } - inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); } - inline static MEMFLAGS virtual_memory_type_tag() { return (tag_type | vmBit); } - - public: - MemPointerRecord(): _size(0), _flags(mtNone) { } - - MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0): - MemPointer(addr), _flags(memflags), _size(size) { } - - MemPointerRecord(const MemPointerRecord& copy_from): - MemPointer(copy_from), _flags(copy_from.flags()), - _size(copy_from.size()) { - } - - /* MemPointerRecord is not sequenced, it always return - * 0 to indicate non-sequenced - */ - virtual jint seq() const { return 0; } - - inline size_t size() const { return _size; } - inline void set_size(size_t size) { _size = size; } - - inline MEMFLAGS flags() const { return _flags; } - inline void set_flags(MEMFLAGS flags) { _flags = flags; } - - MemPointerRecord& operator= (const MemPointerRecord& ptr) { - MemPointer::operator=(ptr); - _flags = ptr.flags(); -#ifdef ASSERT - if (IS_ARENA_OBJ(_flags)) { - assert(!is_vm_pointer(), "wrong flags"); - assert((_flags & ot_masks) == otArena, "wrong flags"); - } -#endif - _size = ptr.size(); - return *this; - } - - // if the pointer represents a malloc-ed memory address - inline bool is_malloced_pointer() const { - return !is_vm_pointer(); - } - - // if the pointer represents a virtual memory address - inline bool is_vm_pointer() const { - return is_virtual_memory_record(_flags); - } - - // if this record records a 'malloc' or virtual memory - // 'reserve' call - inline bool is_allocation_record() const { - return is_allocation_record(_flags); - } - - // if this record records a size information of an arena - inline bool is_arena_memory_record() const { - return is_arena_memory_record(_flags); - } - - // if this pointer represents an address to an arena object - inline bool is_arena_record() const { - return is_arena_record(_flags); - } - - // if this record represents a size information of specific arena - inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) { - assert(is_arena_memory_record(), "not size record"); - assert(arena_rc->is_arena_record(), "not arena record"); - return (arena_rc->addr() + sizeof(void*)) == addr(); - } - - // if this record records a 'free' or virtual memory 'free' call - inline bool is_deallocation_record() const { - return is_deallocation_record(_flags); - } - - // if this record records a virtual memory 'commit' call - inline bool is_commit_record() const { - return is_virtual_memory_commit_record(_flags); - } - - // if this record records a virtual memory 'uncommit' call - inline bool is_uncommit_record() const { - return is_virtual_memory_uncommit_record(_flags); - } - - // if this record is a tagging record of a virtual memory block - inline bool is_type_tagging_record() const { - return is_virtual_memory_type_record(_flags); - } - - // if the two memory pointer records actually represent the same - // memory block - inline bool is_same_region(const MemPointerRecord* other) const { - return (addr() == other->addr() && size() == other->size()); - } - - // if this memory region fully contains another one - inline bool contains_region(const MemPointerRecord* other) const { - return contains_region(other->addr(), other->size()); - } - - // if this memory region fully contains specified memory range - inline bool contains_region(address add, size_t sz) const { - return (addr() <= add && addr() + size() >= add + sz); - } - - inline bool contains_address(address add) const { - return (addr() <= add && addr() + size() > add); - } - - // if this memory region overlaps another region - inline bool overlaps_region(const MemPointerRecord* other) const { - assert(other != NULL, "Just check"); - assert(size() > 0 && other->size() > 0, "empty range"); - return contains_address(other->addr()) || - contains_address(other->addr() + other->size() - 1) || // exclude end address - other->contains_address(addr()) || - other->contains_address(addr() + size() - 1); // exclude end address - } - -}; - -// MemPointerRecordEx also records callsite pc, from where -// the memory block is allocated -class MemPointerRecordEx : public MemPointerRecord { - private: - address _pc; // callsite pc - - public: - MemPointerRecordEx(): _pc(0) { } - - MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0): - MemPointerRecord(addr, memflags, size), _pc(pc) {} - - MemPointerRecordEx(const MemPointerRecordEx& copy_from): - MemPointerRecord(copy_from), _pc(copy_from.pc()) {} - - inline address pc() const { return _pc; } - - void init(const MemPointerRecordEx* mpe) { - MemPointerRecord::operator=(*mpe); - _pc = mpe->pc(); - } - - void init(const MemPointerRecord* mp) { - MemPointerRecord::operator=(*mp); - _pc = 0; - } -}; - -// a virtual memory region. The region can represent a reserved -// virtual memory region or a committed memory region -class VMMemRegion : public MemPointerRecord { -public: - VMMemRegion() { } - - void init(const MemPointerRecord* mp) { - assert(mp->is_vm_pointer(), "Sanity check"); - _addr = mp->addr(); - set_size(mp->size()); - set_flags(mp->flags()); - } - - VMMemRegion& operator=(const VMMemRegion& other) { - MemPointerRecord::operator=(other); - return *this; - } - - inline bool is_reserved_region() const { - return is_allocation_record(); - } - - inline bool is_committed_region() const { - return is_commit_record(); - } - - /* base address of this virtual memory range */ - inline address base() const { - return addr(); - } - - /* tag this virtual memory range to the specified memory type */ - inline void tag(MEMFLAGS f) { - set_flags(flags() | (f & mt_masks)); - } - - // expand this region to also cover specified range. - // The range has to be on either end of the memory region. - void expand_region(address addr, size_t sz) { - if (addr < base()) { - assert(addr + sz == base(), "Sanity check"); - _addr = addr; - set_size(size() + sz); - } else { - assert(base() + size() == addr, "Sanity check"); - set_size(size() + sz); - } - } - - // exclude the specified address range from this region. - // The excluded memory range has to be on either end of this memory - // region. - inline void exclude_region(address add, size_t sz) { - assert(is_reserved_region() || is_committed_region(), "Sanity check"); - assert(addr() != NULL && size() != 0, "Sanity check"); - assert(add >= addr() && add < addr() + size(), "Sanity check"); - assert(add == addr() || (add + sz) == (addr() + size()), - "exclude in the middle"); - if (add == addr()) { - set_addr(add + sz); - set_size(size() - sz); - } else { - set_size(size() - sz); - } - } -}; - -class VMMemRegionEx : public VMMemRegion { - private: - jint _seq; // sequence number - - public: - VMMemRegionEx(): _pc(0) { } - - void init(const MemPointerRecordEx* mpe) { - VMMemRegion::init(mpe); - _pc = mpe->pc(); - } - - void init(const MemPointerRecord* mpe) { - VMMemRegion::init(mpe); - _pc = 0; - } - - VMMemRegionEx& operator=(const VMMemRegionEx& other) { - VMMemRegion::operator=(other); - _pc = other.pc(); - return *this; - } - - inline address pc() const { return _pc; } - private: - address _pc; -}; - -/* - * Sequenced memory record - */ -class SeqMemPointerRecord : public MemPointerRecord { - private: - jint _seq; // sequence number - - public: - SeqMemPointerRecord(): _seq(0){ } - - SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq) - : MemPointerRecord(addr, flags, size), _seq(seq) { - } - - SeqMemPointerRecord(const SeqMemPointerRecord& copy_from) - : MemPointerRecord(copy_from) { - _seq = copy_from.seq(); - } - - SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) { - MemPointerRecord::operator=(ptr); - _seq = ptr.seq(); - return *this; - } - - inline jint seq() const { - return _seq; - } -}; - - - -class SeqMemPointerRecordEx : public MemPointerRecordEx { - private: - jint _seq; // sequence number - - public: - SeqMemPointerRecordEx(): _seq(0) { } - - SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size, - jint seq, address pc): - MemPointerRecordEx(addr, flags, size, pc), _seq(seq) { - } - - SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from) - : MemPointerRecordEx(copy_from) { - _seq = copy_from.seq(); - } - - SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) { - MemPointerRecordEx::operator=(ptr); - _seq = ptr.seq(); - return *this; - } - - inline jint seq() const { - return _seq; - } -}; - -#endif // SHARE_VM_SERVICES_MEM_PTR_HPP --- ./hotspot/src/share/vm/services/memPtrArray.hpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ -#ifndef SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP -#define SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP - -#include "memory/allocation.hpp" -#include "services/memPtr.hpp" - -class MemPtr; -class MemRecorder; -class ArenaInfo; -class MemSnapshot; - -extern "C" { - typedef int (*FN_SORT)(const void *, const void *); -} - - -// Memory pointer array interface. This array is used by NMT to hold -// various memory block information. -// The memory pointer arrays are usually walked with their iterators. - -class MemPointerArray : public CHeapObj { - public: - virtual ~MemPointerArray() { } - - // return true if it can not allocate storage for the data - virtual bool out_of_memory() const = 0; - virtual bool is_empty() const = 0; - virtual bool is_full() = 0; - virtual int length() const = 0; - virtual void clear() = 0; - virtual bool append(MemPointer* ptr) = 0; - virtual bool insert_at(MemPointer* ptr, int pos) = 0; - virtual bool remove_at(int pos) = 0; - virtual MemPointer* at(int index) const = 0; - virtual void sort(FN_SORT fn) = 0; - virtual size_t instance_size() const = 0; - virtual bool shrink() = 0; - - NOT_PRODUCT(virtual int capacity() const = 0;) -}; - -// Iterator interface -class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC { - public: - // return the pointer at current position - virtual MemPointer* current() const = 0; - // return the next pointer and advance current position - virtual MemPointer* next() = 0; - // return next pointer without advancing current position - virtual MemPointer* peek_next() const = 0; - // return previous pointer without changing current position - virtual MemPointer* peek_prev() const = 0; - // remove the pointer at current position - virtual void remove() = 0; - // insert the pointer at current position - virtual bool insert(MemPointer* ptr) = 0; - // insert specified element after current position and - // move current position to newly inserted position - virtual bool insert_after(MemPointer* ptr) = 0; -}; - -// implementation class -class MemPointerArrayIteratorImpl : public MemPointerArrayIterator { - protected: - MemPointerArray* _array; - int _pos; - - public: - MemPointerArrayIteratorImpl(MemPointerArray* arr) { - assert(arr != NULL, "Parameter check"); - _array = arr; - _pos = 0; - } - - virtual MemPointer* current() const { - if (_pos < _array->length()) { - return _array->at(_pos); - } - return NULL; - } - - virtual MemPointer* next() { - if (_pos + 1 < _array->length()) { - return _array->at(++_pos); - } - _pos = _array->length(); - return NULL; - } - - virtual MemPointer* peek_next() const { - if (_pos + 1 < _array->length()) { - return _array->at(_pos + 1); - } - return NULL; - } - - virtual MemPointer* peek_prev() const { - if (_pos > 0) { - return _array->at(_pos - 1); - } - return NULL; - } - - virtual void remove() { - if (_pos < _array->length()) { - _array->remove_at(_pos); - } - } - - virtual bool insert(MemPointer* ptr) { - return _array->insert_at(ptr, _pos); - } - - virtual bool insert_after(MemPointer* ptr) { - if (_array->insert_at(ptr, _pos + 1)) { - _pos ++; - return true; - } - return false; - } -}; - - - -// Memory pointer array implementation. -// This implementation implements expandable array -#define DEFAULT_PTR_ARRAY_SIZE 1024 - -template class MemPointerArrayImpl : public MemPointerArray { - private: - int _max_size; - int _size; - bool _init_elements; - E* _data; - - public: - MemPointerArrayImpl(int initial_size = DEFAULT_PTR_ARRAY_SIZE, bool init_elements = true): - _max_size(initial_size), _size(0), _init_elements(init_elements) { - _data = (E*)raw_allocate(sizeof(E), initial_size); - if (_init_elements) { - for (int index = 0; index < _max_size; index ++) { - ::new ((void*)&_data[index]) E(); - } - } - } - - virtual ~MemPointerArrayImpl() { - if (_data != NULL) { - raw_free(_data); - } - } - - public: - bool out_of_memory() const { - return (_data == NULL); - } - - size_t instance_size() const { - return sizeof(MemPointerArrayImpl) + _max_size * sizeof(E); - } - - bool is_empty() const { - assert(_data != NULL, "Just check"); - return _size == 0; - } - - bool is_full() { - assert(_data != NULL, "Just check"); - if (_size < _max_size) { - return false; - } else { - return !expand_array(); - } - } - - int length() const { - assert(_data != NULL, "Just check"); - return _size; - } - - NOT_PRODUCT(int capacity() const { return _max_size; }) - - void clear() { - assert(_data != NULL, "Just check"); - _size = 0; - } - - bool append(MemPointer* ptr) { - assert(_data != NULL, "Just check"); - if (is_full()) { - return false; - } - _data[_size ++] = *(E*)ptr; - return true; - } - - bool insert_at(MemPointer* ptr, int pos) { - assert(_data != NULL, "Just check"); - if (is_full()) { - return false; - } - for (int index = _size; index > pos; index --) { - _data[index] = _data[index - 1]; - } - _data[pos] = *(E*)ptr; - _size ++; - return true; - } - - bool remove_at(int pos) { - assert(_data != NULL, "Just check"); - if (_size <= pos && pos >= 0) { - return false; - } - -- _size; - - for (int index = pos; index < _size; index ++) { - _data[index] = _data[index + 1]; - } - return true; - } - - MemPointer* at(int index) const { - assert(_data != NULL, "Just check"); - assert(index >= 0 && index < _size, "illegal index"); - return &_data[index]; - } - - bool shrink() { - float used = ((float)_size) / ((float)_max_size); - if (used < 0.40) { - E* old_ptr = _data; - int new_size = ((_max_size) / (2 * DEFAULT_PTR_ARRAY_SIZE) + 1) * DEFAULT_PTR_ARRAY_SIZE; - _data = (E*)raw_reallocate(_data, sizeof(E), new_size); - if (_data == NULL) { - _data = old_ptr; - return false; - } else { - _max_size = new_size; - return true; - } - } - return false; - } - - void sort(FN_SORT fn) { - assert(_data != NULL, "Just check"); - qsort((void*)_data, _size, sizeof(E), fn); - } - - private: - bool expand_array() { - assert(_data != NULL, "Not yet allocated"); - E* old_ptr = _data; - if ((_data = (E*)raw_reallocate((void*)_data, sizeof(E), - _max_size + DEFAULT_PTR_ARRAY_SIZE)) == NULL) { - _data = old_ptr; - return false; - } else { - _max_size += DEFAULT_PTR_ARRAY_SIZE; - if (_init_elements) { - for (int index = _size; index < _max_size; index ++) { - ::new ((void*)&_data[index]) E(); - } - } - return true; - } - } - - void* raw_allocate(size_t elementSize, int items) { - return os::malloc(elementSize * items, mtNMT); - } - - void* raw_reallocate(void* ptr, size_t elementSize, int items) { - return os::realloc(ptr, elementSize * items, mtNMT); - } - - void raw_free(void* ptr) { - os::free(ptr, mtNMT); - } -}; - -#endif // SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP --- ./hotspot/src/share/vm/services/memRecorder.cpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,171 +0,0 @@ -/* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" - -#include "runtime/atomic.hpp" -#include "services/memBaseline.hpp" -#include "services/memRecorder.hpp" -#include "services/memPtr.hpp" -#include "services/memTracker.hpp" - -MemPointer* SequencedRecordIterator::next_record() { - MemPointerRecord* itr_cur = (MemPointerRecord*)_itr.current(); - if (itr_cur == NULL) { - return itr_cur; - } - - MemPointerRecord* itr_next = (MemPointerRecord*)_itr.next(); - - // don't collapse virtual memory records - while (itr_next != NULL && !itr_cur->is_vm_pointer() && - !itr_next->is_vm_pointer() && - same_kind(itr_cur, itr_next)) { - itr_cur = itr_next; - itr_next = (MemPointerRecord*)_itr.next(); - } - - return itr_cur; -} - - -volatile jint MemRecorder::_instance_count = 0; - -MemRecorder::MemRecorder() { - assert(MemTracker::is_on(), "Native memory tracking is off"); - Atomic::inc(&_instance_count); - set_generation(); - - if (MemTracker::track_callsite()) { - _pointer_records = new (std::nothrow)FixedSizeMemPointerArray(); - } else { - _pointer_records = new (std::nothrow)FixedSizeMemPointerArray(); - } - _next = NULL; - - - if (_pointer_records != NULL) { - // recode itself - address pc = CURRENT_PC; - record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder), - sizeof(MemRecorder), SequenceGenerator::next(), pc); - record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder), - _pointer_records->instance_size(), SequenceGenerator::next(), pc); - } -} - -MemRecorder::~MemRecorder() { - if (_pointer_records != NULL) { - if (MemTracker::is_on()) { - MemTracker::record_free((address)_pointer_records, mtNMT); - MemTracker::record_free((address)this, mtNMT); - } - delete _pointer_records; - } - // delete all linked recorders - while (_next != NULL) { - MemRecorder* tmp = _next; - _next = _next->next(); - tmp->set_next(NULL); - delete tmp; - } - Atomic::dec(&_instance_count); -} - -// Sorting order: -// 1. memory block address -// 2. mem pointer record tags -// 3. sequence number -int MemRecorder::sort_record_fn(const void* e1, const void* e2) { - const MemPointerRecord* p1 = (const MemPointerRecord*)e1; - const MemPointerRecord* p2 = (const MemPointerRecord*)e2; - int delta = UNSIGNED_COMPARE(p1->addr(), p2->addr()); - if (delta == 0) { - int df = UNSIGNED_COMPARE((p1->flags() & MemPointerRecord::tag_masks), - (p2->flags() & MemPointerRecord::tag_masks)); - if (df == 0) { - assert(p1->seq() != p2->seq(), "dup seq"); - return p1->seq() - p2->seq(); - } else { - return df; - } - } else { - return delta; - } -} - -bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) { - assert(seq > 0, "No sequence number"); -#ifdef ASSERT - if (MemPointerRecord::is_virtual_memory_record(flags)) { - assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record"); - } else { - assert((flags & MemPointerRecord::tag_masks) == MemPointerRecord::malloc_tag() || - (flags & MemPointerRecord::tag_masks) == MemPointerRecord::free_tag() || - IS_ARENA_OBJ(flags), - "bad malloc record"); - } - // a recorder should only hold records within the same generation - unsigned long cur_generation = SequenceGenerator::current_generation(); - assert(cur_generation == _generation, - "this thread did not enter sync point"); -#endif - - if (MemTracker::track_callsite()) { - SeqMemPointerRecordEx ap(p, flags, size, seq, pc); - debug_only(check_dup_seq(ap.seq());) - return _pointer_records->append(&ap); - } else { - SeqMemPointerRecord ap(p, flags, size, seq); - debug_only(check_dup_seq(ap.seq());) - return _pointer_records->append(&ap); - } -} - - // iterator for alloc pointers -SequencedRecordIterator MemRecorder::pointer_itr() { - assert(_pointer_records != NULL, "just check"); - _pointer_records->sort((FN_SORT)sort_record_fn); - return SequencedRecordIterator(_pointer_records); -} - - -void MemRecorder::set_generation() { - _generation = SequenceGenerator::current_generation(); -} - -#ifdef ASSERT - -void MemRecorder::check_dup_seq(jint seq) const { - MemPointerArrayIteratorImpl itr(_pointer_records); - MemPointerRecord* rc = (MemPointerRecord*)itr.current(); - while (rc != NULL) { - assert(rc->seq() != seq, "dup seq"); - rc = (MemPointerRecord*)itr.next(); - } -} - -#endif --- ./hotspot/src/share/vm/services/memRecorder.hpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,271 +0,0 @@ -/* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_SERVICES_MEM_RECORDER_HPP -#define SHARE_VM_SERVICES_MEM_RECORDER_HPP - -#include "memory/allocation.hpp" -#include "runtime/os.hpp" -#include "services/memPtrArray.hpp" - -class MemSnapshot; -class MemTracker; -class MemTrackWorker; - -// Fixed size memory pointer array implementation -template class FixedSizeMemPointerArray : - public MemPointerArray { - // This implementation is for memory recorder only - friend class MemRecorder; - - private: - E _data[SIZE]; - int _size; - - protected: - FixedSizeMemPointerArray(bool init_elements = false): - _size(0){ - if (init_elements) { - for (int index = 0; index < SIZE; index ++) { - ::new ((void*)&_data[index]) E(); - } - } - } - - void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { - // the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder' - // to avoid recursion - return os::malloc(size, (mtNMT | otNMTRecorder)); - } - - void* operator new(size_t size) throw() { - assert(false, "use nothrow version"); - return NULL; - } - - void operator delete(void* p) { - os::free(p, (mtNMT | otNMTRecorder)); - } - - // instance size - inline size_t instance_size() const { - return sizeof(FixedSizeMemPointerArray); - } - - NOT_PRODUCT(int capacity() const { return SIZE; }) - - public: - // implementation of public interface - bool out_of_memory() const { return false; } - bool is_empty() const { return _size == 0; } - bool is_full() { return length() >= SIZE; } - int length() const { return _size; } - - void clear() { - _size = 0; - } - - bool append(MemPointer* ptr) { - if (is_full()) return false; - _data[_size ++] = *(E*)ptr; - return true; - } - - virtual bool insert_at(MemPointer* p, int pos) { - assert(false, "append only"); - return false; - } - - virtual bool remove_at(int pos) { - assert(false, "not supported"); - return false; - } - - MemPointer* at(int index) const { - assert(index >= 0 && index < length(), - "parameter check"); - return ((E*)&_data[index]); - } - - void sort(FN_SORT fn) { - qsort((void*)_data, _size, sizeof(E), fn); - } - - bool shrink() { - return false; - } -}; - - -// This iterator requires pre-sorted MemPointerArray, which is sorted by: -// 1. address -// 2. allocation type -// 3. sequence number -// During the array walking, iterator collapses pointers with the same -// address and allocation type, and only returns the one with highest -// sequence number. -// -// This is read-only iterator, update methods are asserted. -class SequencedRecordIterator : public MemPointerArrayIterator { - private: - MemPointerArrayIteratorImpl _itr; - MemPointer* _cur; - - public: - SequencedRecordIterator(const MemPointerArray* arr): - _itr(const_cast(arr)) { - _cur = next_record(); - } - - SequencedRecordIterator(const SequencedRecordIterator& itr): - _itr(itr._itr) { - _cur = next_record(); - } - - // return the pointer at current position - virtual MemPointer* current() const { - return _cur; - }; - - // return the next pointer and advance current position - virtual MemPointer* next() { - _cur = next_record(); - return _cur; - } - - // return the next pointer without advancing current position - virtual MemPointer* peek_next() const { - assert(false, "not implemented"); - return NULL; - - } - // return the previous pointer without changing current position - virtual MemPointer* peek_prev() const { - assert(false, "not implemented"); - return NULL; - } - - // remove the pointer at current position - virtual void remove() { - assert(false, "read-only iterator"); - }; - // insert the pointer at current position - virtual bool insert(MemPointer* ptr) { - assert(false, "read-only iterator"); - return false; - } - - virtual bool insert_after(MemPointer* ptr) { - assert(false, "read-only iterator"); - return false; - } - private: - // collapse the 'same kind' of records, and return this 'kind' of - // record with highest sequence number - MemPointer* next_record(); - - // Test if the two records are the same kind: the same memory block and allocation - // type. - inline bool same_kind(const MemPointerRecord* p1, const MemPointerRecord* p2) const { - assert(!p1->is_vm_pointer() && !p2->is_vm_pointer(), "malloc pointer only"); - return (p1->addr() == p2->addr() && - (p1->flags() &MemPointerRecord::tag_masks) == - (p2->flags() & MemPointerRecord::tag_masks)); - } -}; - - - -#define DEFAULT_RECORDER_PTR_ARRAY_SIZE 512 - -class MemRecorder : public CHeapObj { - friend class MemSnapshot; - friend class MemTracker; - friend class MemTrackWorker; - friend class GenerationData; - - protected: - // the array that holds memory records - MemPointerArray* _pointer_records; - - private: - // used for linked list - MemRecorder* _next; - // active recorder can only record a certain generation data - unsigned long _generation; - - protected: - _NOINLINE_ MemRecorder(); - ~MemRecorder(); - - // record a memory operation - bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0); - - // linked list support - inline void set_next(MemRecorder* rec) { - _next = rec; - } - - inline MemRecorder* next() const { - return _next; - } - - // if the recorder is full - inline bool is_full() const { - assert(_pointer_records != NULL, "just check"); - return _pointer_records->is_full(); - } - - // if running out of memory when initializing recorder's internal - // data - inline bool out_of_memory() const { - return (_pointer_records == NULL || - _pointer_records->out_of_memory()); - } - - inline void clear() { - assert(_pointer_records != NULL, "Just check"); - _pointer_records->clear(); - } - - SequencedRecordIterator pointer_itr(); - - // return the generation of this recorder which it belongs to - unsigned long get_generation() const { return _generation; } - protected: - // number of MemRecorder instance - static volatile jint _instance_count; - - private: - // sorting function, sort records into following order - // 1. memory address - // 2. allocation type - // 3. sequence number - static int sort_record_fn(const void* e1, const void* e2); - - debug_only(void check_dup_seq(jint seq) const;) - void set_generation(); -}; - -#endif // SHARE_VM_SERVICES_MEM_RECORDER_HPP --- ./hotspot/src/share/vm/services/memReporter.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/memReporter.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -22,618 +22,595 @@ * */ #include "precompiled.hpp" -#include "classfile/systemDictionary.hpp" -#include "runtime/os.hpp" + +#include "memory/allocation.hpp" +#include "services/mallocTracker.hpp" #include "services/memReporter.hpp" -#include "services/memPtrArray.hpp" -#include "services/memTracker.hpp" +#include "services/virtualMemoryTracker.hpp" +#include "utilities/globalDefinitions.hpp" -PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC - -const char* BaselineOutputer::memory_unit(size_t scale) { - switch(scale) { - case K: return "KB"; - case M: return "MB"; - case G: return "GB"; - } - ShouldNotReachHere(); - return NULL; +size_t MemReporterBase::reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const { + return malloc->malloc_size() + malloc->arena_size() + vm->reserved(); } - -void BaselineReporter::report_baseline(const MemBaseline& baseline, bool summary_only) { - assert(MemTracker::is_on(), "Native memory tracking is off"); - _outputer.start(scale()); - _outputer.total_usage( - amount_in_current_scale(baseline.total_malloc_amount() + baseline.total_reserved_amount()), - amount_in_current_scale(baseline.total_malloc_amount() + baseline.total_committed_amount())); - - _outputer.num_of_classes(baseline.number_of_classes()); - _outputer.num_of_threads(baseline.number_of_threads()); - - report_summaries(baseline); - if (!summary_only && MemTracker::track_callsite()) { - report_virtual_memory_map(baseline); - report_callsites(baseline); - } - _outputer.done(); +size_t MemReporterBase::committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const { + return malloc->malloc_size() + malloc->arena_size() + vm->committed(); } -void BaselineReporter::report_summaries(const MemBaseline& baseline) { - _outputer.start_category_summary(); - MEMFLAGS type; +void MemReporterBase::print_total(size_t reserved, size_t committed) const { + const char* scale = current_scale(); + output()->print("reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s", + amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale); +} - for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { - type = MemBaseline::MemType2NameMap[index]._flag; - _outputer.category_summary(type, - amount_in_current_scale(baseline.reserved_amount(type)), - amount_in_current_scale(baseline.committed_amount(type)), - amount_in_current_scale(baseline.malloc_amount(type)), - baseline.malloc_count(type), - amount_in_current_scale(baseline.arena_amount(type)), - baseline.arena_count(type)); +void MemReporterBase::print_malloc(size_t amount, size_t count) const { + const char* scale = current_scale(); + outputStream* out = output(); + out->print("(malloc=" SIZE_FORMAT "%s", + amount_in_current_scale(amount), scale); + + if (count > 0) { + out->print(" #" SIZE_FORMAT "", count); } - _outputer.done_category_summary(); + out->print(")"); } -void BaselineReporter::report_virtual_memory_map(const MemBaseline& baseline) { - _outputer.start_virtual_memory_map(); - MemBaseline* pBL = const_cast(&baseline); - MemPointerArrayIteratorImpl itr = MemPointerArrayIteratorImpl(pBL->_vm_map); - VMMemRegionEx* rgn = (VMMemRegionEx*)itr.current(); - while (rgn != NULL) { - if (rgn->is_reserved_region()) { - _outputer.reserved_memory_region(FLAGS_TO_MEMORY_TYPE(rgn->flags()), - rgn->base(), rgn->base() + rgn->size(), amount_in_current_scale(rgn->size()), rgn->pc()); - } else { - _outputer.committed_memory_region(rgn->base(), rgn->base() + rgn->size(), - amount_in_current_scale(rgn->size()), rgn->pc()); - } - rgn = (VMMemRegionEx*)itr.next(); +void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed) const { + const char* scale = current_scale(); + output()->print("(mmap: reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s)", + amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale); +} + +void MemReporterBase::print_malloc_line(size_t amount, size_t count) const { + output()->print("%28s", " "); + print_malloc(amount, count); + output()->print_cr(" "); +} + +void MemReporterBase::print_virtual_memory_line(size_t reserved, size_t committed) const { + output()->print("%28s", " "); + print_virtual_memory(reserved, committed); + output()->print_cr(" "); +} + +void MemReporterBase::print_arena_line(size_t amount, size_t count) const { + const char* scale = current_scale(); + output()->print_cr("%27s (arena=" SIZE_FORMAT "%s #" SIZE_FORMAT ")", " ", + amount_in_current_scale(amount), scale, count); +} + +void MemReporterBase::print_virtual_memory_region(const char* type, address base, size_t size) const { + const char* scale = current_scale(); + output()->print("[" PTR_FORMAT " - " PTR_FORMAT "] %s " SIZE_FORMAT "%s", + p2i(base), p2i(base + size), type, amount_in_current_scale(size), scale); +} + + +void MemSummaryReporter::report() { + const char* scale = current_scale(); + outputStream* out = output(); + size_t total_reserved_amount = _malloc_snapshot->total() + + _vm_snapshot->total_reserved(); + size_t total_committed_amount = _malloc_snapshot->total() + + _vm_snapshot->total_committed(); + + // Overall total + out->print_cr("\nNative Memory Tracking:\n"); + out->print("Total: "); + print_total(total_reserved_amount, total_committed_amount); + out->print("\n"); + + // Summary by memory type + for (int index = 0; index < mt_number_of_types; index ++) { + MEMFLAGS flag = NMTUtil::index_to_flag(index); + // thread stack is reported as part of thread category + if (flag == mtThreadStack) continue; + MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag); + VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag); + + report_summary_of_type(flag, malloc_memory, virtual_memory); + } +} + +void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag, + MallocMemory* malloc_memory, VirtualMemory* virtual_memory) { + + size_t reserved_amount = reserved_total (malloc_memory, virtual_memory); + size_t committed_amount = committed_total(malloc_memory, virtual_memory); + + // Count thread's native stack in "Thread" category + if (flag == mtThread) { + const VirtualMemory* thread_stack_usage = + (const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack); + reserved_amount += thread_stack_usage->reserved(); + committed_amount += thread_stack_usage->committed(); + } else if (flag == mtNMT) { + // Count malloc headers in "NMT" category + reserved_amount += _malloc_snapshot->malloc_overhead()->size(); + committed_amount += _malloc_snapshot->malloc_overhead()->size(); } - _outputer.done_virtual_memory_map(); + if (amount_in_current_scale(reserved_amount) > 0) { + outputStream* out = output(); + const char* scale = current_scale(); + out->print("-%26s (", NMTUtil::flag_to_name(flag)); + print_total(reserved_amount, committed_amount); + out->print_cr(")"); + + if (flag == mtClass) { + // report class count + out->print_cr("%27s (classes #" SIZE_FORMAT ")", " ", _class_count); + } else if (flag == mtThread) { + // report thread count + out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", _malloc_snapshot->thread_count()); + const VirtualMemory* thread_stack_usage = + _vm_snapshot->by_type(mtThreadStack); + out->print("%27s (stack: ", " "); + print_total(thread_stack_usage->reserved(), thread_stack_usage->committed()); + out->print_cr(")"); + } + + // report malloc'd memory + if (amount_in_current_scale(malloc_memory->malloc_size()) > 0) { + // We don't know how many arena chunks are in used, so don't report the count + size_t count = (flag == mtChunk) ? 0 : malloc_memory->malloc_count(); + print_malloc_line(malloc_memory->malloc_size(), count); + } + + if (amount_in_current_scale(virtual_memory->reserved()) > 0) { + print_virtual_memory_line(virtual_memory->reserved(), virtual_memory->committed()); + } + + if (amount_in_current_scale(malloc_memory->arena_size()) > 0) { + print_arena_line(malloc_memory->arena_size(), malloc_memory->arena_count()); + } + + if (flag == mtNMT && + amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()) > 0) { + out->print_cr("%27s (tracking overhead=" SIZE_FORMAT "%s)", " ", + amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()), scale); + } + + out->print_cr(" "); + } } -void BaselineReporter::report_callsites(const MemBaseline& baseline) { - _outputer.start_callsite(); - MemBaseline* pBL = const_cast(&baseline); +void MemDetailReporter::report_detail() { + // Start detail report + outputStream* out = output(); + out->print_cr("Details:\n"); - pBL->_malloc_cs->sort((FN_SORT)MemBaseline::bl_malloc_sort_by_size); - pBL->_vm_cs->sort((FN_SORT)MemBaseline::bl_vm_sort_by_size); + report_malloc_sites(); + report_virtual_memory_allocation_sites(); +} - // walk malloc callsites - MemPointerArrayIteratorImpl malloc_itr(pBL->_malloc_cs); - MallocCallsitePointer* malloc_callsite = - (MallocCallsitePointer*)malloc_itr.current(); - while (malloc_callsite != NULL) { - _outputer.malloc_callsite(malloc_callsite->addr(), - amount_in_current_scale(malloc_callsite->amount()), malloc_callsite->count()); - malloc_callsite = (MallocCallsitePointer*)malloc_itr.next(); +void MemDetailReporter::report_malloc_sites() { + MallocSiteIterator malloc_itr = _baseline.malloc_sites(MemBaseline::by_size); + if (malloc_itr.is_empty()) return; + + outputStream* out = output(); + + const MallocSite* malloc_site; + while ((malloc_site = malloc_itr.next()) != NULL) { + // Don't report if size is too small + if (amount_in_current_scale(malloc_site->size()) == 0) + continue; + + const NativeCallStack* stack = malloc_site->call_stack(); + stack->print_on(out); + out->print("%29s", " "); + print_malloc(malloc_site->size(), malloc_site->count()); + out->print_cr("\n"); + } +} + +void MemDetailReporter::report_virtual_memory_allocation_sites() { + VirtualMemorySiteIterator virtual_memory_itr = + _baseline.virtual_memory_sites(MemBaseline::by_size); + + if (virtual_memory_itr.is_empty()) return; + + outputStream* out = output(); + const VirtualMemoryAllocationSite* virtual_memory_site; + + while ((virtual_memory_site = virtual_memory_itr.next()) != NULL) { + // Don't report if size is too small + if (amount_in_current_scale(virtual_memory_site->reserved()) == 0) + continue; + + const NativeCallStack* stack = virtual_memory_site->call_stack(); + stack->print_on(out); + out->print("%28s (", " "); + print_total(virtual_memory_site->reserved(), virtual_memory_site->committed()); + out->print_cr(")\n"); + } +} + + +void MemDetailReporter::report_virtual_memory_map() { + // Virtual memory map always in base address order + VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations(); + const ReservedMemoryRegion* rgn; + + output()->print_cr("Virtual memory map:"); + while ((rgn = itr.next()) != NULL) { + report_virtual_memory_region(rgn); + } +} + +void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) { + assert(reserved_rgn != NULL, "NULL pointer"); + + // Don't report if size is too small + if (amount_in_current_scale(reserved_rgn->size()) == 0) return; + + outputStream* out = output(); + const char* scale = current_scale(); + const NativeCallStack* stack = reserved_rgn->call_stack(); + bool all_committed = reserved_rgn->all_committed(); + const char* region_type = (all_committed ? "reserved and committed" : "reserved"); + out->print_cr(" "); + print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size()); + out->print(" for %s", NMTUtil::flag_to_name(reserved_rgn->flag())); + if (stack->is_empty()) { + out->print_cr(" "); + } else { + out->print_cr(" from"); + stack->print_on(out, 4); } - // walk virtual memory callsite - MemPointerArrayIteratorImpl vm_itr(pBL->_vm_cs); - VMCallsitePointer* vm_callsite = (VMCallsitePointer*)vm_itr.current(); - while (vm_callsite != NULL) { - _outputer.virtual_memory_callsite(vm_callsite->addr(), - amount_in_current_scale(vm_callsite->reserved_amount()), - amount_in_current_scale(vm_callsite->committed_amount())); - vm_callsite = (VMCallsitePointer*)vm_itr.next(); - } - pBL->_malloc_cs->sort((FN_SORT)MemBaseline::bl_malloc_sort_by_pc); - pBL->_vm_cs->sort((FN_SORT)MemBaseline::bl_vm_sort_by_pc); - _outputer.done_callsite(); -} + if (all_committed) return; -void BaselineReporter::diff_baselines(const MemBaseline& cur, const MemBaseline& prev, - bool summary_only) { - assert(MemTracker::is_on(), "Native memory tracking is off"); - _outputer.start(scale()); - size_t total_reserved = cur.total_malloc_amount() + cur.total_reserved_amount(); - size_t total_committed = cur.total_malloc_amount() + cur.total_committed_amount(); - - _outputer.diff_total_usage( - amount_in_current_scale(total_reserved), amount_in_current_scale(total_committed), - diff_in_current_scale(total_reserved, (prev.total_malloc_amount() + prev.total_reserved_amount())), - diff_in_current_scale(total_committed, (prev.total_committed_amount() + prev.total_malloc_amount()))); - - _outputer.diff_num_of_classes(cur.number_of_classes(), - diff(cur.number_of_classes(), prev.number_of_classes())); - _outputer.diff_num_of_threads(cur.number_of_threads(), - diff(cur.number_of_threads(), prev.number_of_threads())); - - diff_summaries(cur, prev); - if (!summary_only && MemTracker::track_callsite()) { - diff_callsites(cur, prev); - } - _outputer.done(); -} - -void BaselineReporter::diff_summaries(const MemBaseline& cur, const MemBaseline& prev) { - _outputer.start_category_summary(); - MEMFLAGS type; - - for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { - type = MemBaseline::MemType2NameMap[index]._flag; - _outputer.diff_category_summary(type, - amount_in_current_scale(cur.reserved_amount(type)), - amount_in_current_scale(cur.committed_amount(type)), - amount_in_current_scale(cur.malloc_amount(type)), - cur.malloc_count(type), - amount_in_current_scale(cur.arena_amount(type)), - cur.arena_count(type), - diff_in_current_scale(cur.reserved_amount(type), prev.reserved_amount(type)), - diff_in_current_scale(cur.committed_amount(type), prev.committed_amount(type)), - diff_in_current_scale(cur.malloc_amount(type), prev.malloc_amount(type)), - diff(cur.malloc_count(type), prev.malloc_count(type)), - diff_in_current_scale(cur.arena_amount(type), prev.arena_amount(type)), - diff(cur.arena_count(type), prev.arena_count(type))); - } - - _outputer.done_category_summary(); -} - -void BaselineReporter::diff_callsites(const MemBaseline& cur, const MemBaseline& prev) { - _outputer.start_callsite(); - MemBaseline* pBL_cur = const_cast(&cur); - MemBaseline* pBL_prev = const_cast(&prev); - - // walk malloc callsites - MemPointerArrayIteratorImpl cur_malloc_itr(pBL_cur->_malloc_cs); - MemPointerArrayIteratorImpl prev_malloc_itr(pBL_prev->_malloc_cs); - - MallocCallsitePointer* cur_malloc_callsite = - (MallocCallsitePointer*)cur_malloc_itr.current(); - MallocCallsitePointer* prev_malloc_callsite = - (MallocCallsitePointer*)prev_malloc_itr.current(); - - while (cur_malloc_callsite != NULL || prev_malloc_callsite != NULL) { - if (prev_malloc_callsite == NULL) { - assert(cur_malloc_callsite != NULL, "sanity check"); - // this is a new callsite - _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), - amount_in_current_scale(cur_malloc_callsite->amount()), - cur_malloc_callsite->count(), - diff_in_current_scale(cur_malloc_callsite->amount(), 0), - diff(cur_malloc_callsite->count(), 0)); - cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); - } else if (cur_malloc_callsite == NULL) { - assert(prev_malloc_callsite != NULL, "Sanity check"); - // this callsite is already gone - _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(), - 0, 0, - diff_in_current_scale(0, prev_malloc_callsite->amount()), - diff(0, prev_malloc_callsite->count())); - prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); + CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions(); + const CommittedMemoryRegion* committed_rgn; + while ((committed_rgn = itr.next()) != NULL) { + // Don't report if size is too small + if (amount_in_current_scale(committed_rgn->size()) == 0) continue; + stack = committed_rgn->call_stack(); + out->print("\n\t"); + print_virtual_memory_region("committed", committed_rgn->base(), committed_rgn->size()); + if (stack->is_empty()) { + out->print_cr(" "); } else { - assert(cur_malloc_callsite != NULL, "Sanity check"); - assert(prev_malloc_callsite != NULL, "Sanity check"); - if (cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) { - // this is a new callsite - _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), - amount_in_current_scale(cur_malloc_callsite->amount()), - cur_malloc_callsite->count(), - diff_in_current_scale(cur_malloc_callsite->amount(), 0), - diff(cur_malloc_callsite->count(), 0)); - cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); - } else if (cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) { - // this callsite is already gone - _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(), - 0, 0, - diff_in_current_scale(0, prev_malloc_callsite->amount()), - diff(0, prev_malloc_callsite->count())); - prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); - } else { - // the same callsite - _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), - amount_in_current_scale(cur_malloc_callsite->amount()), - cur_malloc_callsite->count(), - diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()), - diff(cur_malloc_callsite->count(), prev_malloc_callsite->count())); - cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); - prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); - } - } - } - - // walk virtual memory callsite - MemPointerArrayIteratorImpl cur_vm_itr(pBL_cur->_vm_cs); - MemPointerArrayIteratorImpl prev_vm_itr(pBL_prev->_vm_cs); - VMCallsitePointer* cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.current(); - VMCallsitePointer* prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.current(); - while (cur_vm_callsite != NULL || prev_vm_callsite != NULL) { - if (prev_vm_callsite == NULL || cur_vm_callsite->addr() < prev_vm_callsite->addr()) { - // this is a new callsite - _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(), - amount_in_current_scale(cur_vm_callsite->reserved_amount()), - amount_in_current_scale(cur_vm_callsite->committed_amount()), - diff_in_current_scale(cur_vm_callsite->reserved_amount(), 0), - diff_in_current_scale(cur_vm_callsite->committed_amount(), 0)); - cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.next(); - } else if (cur_vm_callsite == NULL || cur_vm_callsite->addr() > prev_vm_callsite->addr()) { - // this callsite is already gone - _outputer.diff_virtual_memory_callsite(prev_vm_callsite->addr(), - amount_in_current_scale(0), - amount_in_current_scale(0), - diff_in_current_scale(0, prev_vm_callsite->reserved_amount()), - diff_in_current_scale(0, prev_vm_callsite->committed_amount())); - prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next(); - } else { // the same callsite - _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(), - amount_in_current_scale(cur_vm_callsite->reserved_amount()), - amount_in_current_scale(cur_vm_callsite->committed_amount()), - diff_in_current_scale(cur_vm_callsite->reserved_amount(), prev_vm_callsite->reserved_amount()), - diff_in_current_scale(cur_vm_callsite->committed_amount(), prev_vm_callsite->committed_amount())); - cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.next(); - prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next(); - } - } - - _outputer.done_callsite(); -} - -size_t BaselineReporter::amount_in_current_scale(size_t amt) const { - return (size_t)(((float)amt/(float)_scale) + 0.5); -} - -int BaselineReporter::diff_in_current_scale(size_t value1, size_t value2) const { - return (int)(((float)value1 - (float)value2)/((float)_scale) + 0.5); -} - -int BaselineReporter::diff(size_t value1, size_t value2) const { - return ((int)value1 - (int)value2); -} - -void BaselineTTYOutputer::start(size_t scale, bool report_diff) { - _scale = scale; - _output->print_cr(" "); - _output->print_cr("Native Memory Tracking:"); - _output->print_cr(" "); -} - -void BaselineTTYOutputer::done() { - -} - -void BaselineTTYOutputer::total_usage(size_t total_reserved, size_t total_committed) { - const char* unit = memory_unit(_scale); - _output->print_cr("Total: reserved=%d%s, committed=%d%s", - total_reserved, unit, total_committed, unit); -} - -void BaselineTTYOutputer::start_category_summary() { - _output->print_cr(" "); -} - -/** - * report a summary of memory type - */ -void BaselineTTYOutputer::category_summary(MEMFLAGS type, - size_t reserved_amt, size_t committed_amt, size_t malloc_amt, - size_t malloc_count, size_t arena_amt, size_t arena_count) { - - // we report mtThreadStack under mtThread category - if (type == mtThreadStack) { - assert(malloc_amt == 0 && malloc_count == 0 && arena_amt == 0, - "Just check"); - _thread_stack_reserved = reserved_amt; - _thread_stack_committed = committed_amt; - } else { - const char* unit = memory_unit(_scale); - size_t total_reserved = (reserved_amt + malloc_amt + arena_amt); - size_t total_committed = (committed_amt + malloc_amt + arena_amt); - if (type == mtThread) { - total_reserved += _thread_stack_reserved; - total_committed += _thread_stack_committed; - } - - if (total_reserved > 0) { - _output->print_cr("-%26s (reserved=%d%s, committed=%d%s)", - MemBaseline::type2name(type), total_reserved, unit, - total_committed, unit); - - if (type == mtClass) { - _output->print_cr("%27s (classes #%d)", " ", _num_of_classes); - } else if (type == mtThread) { - _output->print_cr("%27s (thread #%d)", " ", _num_of_threads); - _output->print_cr("%27s (stack: reserved=%d%s, committed=%d%s)", " ", - _thread_stack_reserved, unit, _thread_stack_committed, unit); - } - - if (malloc_amt > 0) { - if (type != mtChunk) { - _output->print_cr("%27s (malloc=%d%s, #%d)", " ", malloc_amt, unit, - malloc_count); - } else { - _output->print_cr("%27s (malloc=%d%s)", " ", malloc_amt, unit); - } - } - - if (reserved_amt > 0) { - _output->print_cr("%27s (mmap: reserved=%d%s, committed=%d%s)", - " ", reserved_amt, unit, committed_amt, unit); - } - - if (arena_amt > 0) { - _output->print_cr("%27s (arena=%d%s, #%d)", " ", arena_amt, unit, arena_count); - } - - _output->print_cr(" "); + out->print_cr(" from"); + stack->print_on(out, 12); } } } -void BaselineTTYOutputer::done_category_summary() { - _output->print_cr(" "); -} +void MemSummaryDiffReporter::report_diff() { + const char* scale = current_scale(); + outputStream* out = output(); + out->print_cr("\nNative Memory Tracking:\n"); + // Overall diff + out->print("Total: "); + print_virtual_memory_diff(_current_baseline.total_reserved_memory(), + _current_baseline.total_committed_memory(), _early_baseline.total_reserved_memory(), + _early_baseline.total_committed_memory()); -void BaselineTTYOutputer::start_virtual_memory_map() { - _output->print_cr("Virtual memory map:"); -} + out->print_cr("\n"); -void BaselineTTYOutputer::reserved_memory_region(MEMFLAGS type, address base, address end, - size_t size, address pc) { - const char* unit = memory_unit(_scale); - char buf[128]; - int offset; - _output->print_cr(" "); - _output->print_cr("[" PTR_FORMAT " - " PTR_FORMAT "] reserved %d%s for %s", base, end, size, unit, - MemBaseline::type2name(type)); - if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) { - _output->print_cr("\t\tfrom [%s+0x%x]", buf, offset); + // Summary diff by memory type + for (int index = 0; index < mt_number_of_types; index ++) { + MEMFLAGS flag = NMTUtil::index_to_flag(index); + // thread stack is reported as part of thread category + if (flag == mtThreadStack) continue; + diff_summary_of_type(flag, _early_baseline.malloc_memory(flag), + _early_baseline.virtual_memory(flag), _current_baseline.malloc_memory(flag), + _current_baseline.virtual_memory(flag)); } } -void BaselineTTYOutputer::committed_memory_region(address base, address end, size_t size, address pc) { - const char* unit = memory_unit(_scale); - char buf[128]; - int offset; - _output->print("\t[" PTR_FORMAT " - " PTR_FORMAT "] committed %d%s", base, end, size, unit); - if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) { - _output->print_cr(" from [%s+0x%x]", buf, offset); +void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count, + size_t early_amount, size_t early_count) const { + const char* scale = current_scale(); + outputStream* out = output(); + + out->print("malloc=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale); + long amount_diff = diff_in_current_scale(current_amount, early_amount); + if (amount_diff != 0) { + out->print(" %+ld%s", amount_diff, scale); + } + if (current_count > 0) { + out->print(" #" SIZE_FORMAT "", current_count); + if (current_count != early_count) { + out->print(" %+d", (int)(current_count - early_count)); + } } } -void BaselineTTYOutputer::done_virtual_memory_map() { - _output->print_cr(" "); -} +void MemSummaryDiffReporter::print_arena_diff(size_t current_amount, size_t current_count, + size_t early_amount, size_t early_count) const { + const char* scale = current_scale(); + outputStream* out = output(); + out->print("arena=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale); + if (diff_in_current_scale(current_amount, early_amount) != 0) { + out->print(" %+ld", diff_in_current_scale(current_amount, early_amount)); + } - - -void BaselineTTYOutputer::start_callsite() { - _output->print_cr("Details:"); - _output->print_cr(" "); -} - -void BaselineTTYOutputer::done_callsite() { - _output->print_cr(" "); -} - -void BaselineTTYOutputer::malloc_callsite(address pc, size_t malloc_amt, - size_t malloc_count) { - if (malloc_amt > 0) { - const char* unit = memory_unit(_scale); - char buf[128]; - int offset; - if (pc == 0) { - _output->print("[BOOTSTRAP]%18s", " "); - } else if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) { - _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset); - _output->print("%28s", " "); - } else { - _output->print("[" PTR_FORMAT "]%18s", pc, " "); - } - - _output->print_cr("(malloc=%d%s #%d)", malloc_amt, unit, malloc_count); - _output->print_cr(" "); + out->print(" #" SIZE_FORMAT "", current_count); + if (current_count != early_count) { + out->print(" %+d", (int)(current_count - early_count)); } } -void BaselineTTYOutputer::virtual_memory_callsite(address pc, size_t reserved_amt, - size_t committed_amt) { - if (reserved_amt > 0) { - const char* unit = memory_unit(_scale); - char buf[128]; - int offset; - if (pc == 0) { - _output->print("[BOOTSTRAP]%18s", " "); - } else if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) { - _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset); - _output->print("%28s", " "); - } else { - _output->print("[" PTR_FORMAT "]%18s", pc, " "); - } +void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved, size_t current_committed, + size_t early_reserved, size_t early_committed) const { + const char* scale = current_scale(); + outputStream* out = output(); + out->print("reserved=" SIZE_FORMAT "%s", amount_in_current_scale(current_reserved), scale); + long reserved_diff = diff_in_current_scale(current_reserved, early_reserved); + if (reserved_diff != 0) { + out->print(" %+ld%s", reserved_diff, scale); + } - _output->print_cr("(mmap: reserved=%d%s, committed=%d%s)", - reserved_amt, unit, committed_amt, unit); - _output->print_cr(" "); + out->print(", committed=" SIZE_FORMAT "%s", amount_in_current_scale(current_committed), scale); + long committed_diff = diff_in_current_scale(current_committed, early_committed); + if (committed_diff != 0) { + out->print(" %+ld%s", committed_diff, scale); } } -void BaselineTTYOutputer::diff_total_usage(size_t total_reserved, - size_t total_committed, int reserved_diff, int committed_diff) { - const char* unit = memory_unit(_scale); - _output->print_cr("Total: reserved=%d%s %+d%s, committed=%d%s %+d%s", - total_reserved, unit, reserved_diff, unit, total_committed, unit, - committed_diff, unit); -} -void BaselineTTYOutputer::diff_category_summary(MEMFLAGS type, - size_t cur_reserved_amt, size_t cur_committed_amt, - size_t cur_malloc_amt, size_t cur_malloc_count, - size_t cur_arena_amt, size_t cur_arena_count, - int reserved_diff, int committed_diff, int malloc_diff, - int malloc_count_diff, int arena_diff, int arena_count_diff) { +void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, const MallocMemory* early_malloc, + const VirtualMemory* early_vm, const MallocMemory* current_malloc, + const VirtualMemory* current_vm) const { - if (type == mtThreadStack) { - assert(cur_malloc_amt == 0 && cur_malloc_count == 0 && - cur_arena_amt == 0, "Just check"); - _thread_stack_reserved = cur_reserved_amt; - _thread_stack_committed = cur_committed_amt; - _thread_stack_reserved_diff = reserved_diff; - _thread_stack_committed_diff = committed_diff; - } else { - const char* unit = memory_unit(_scale); - size_t total_reserved = (cur_reserved_amt + cur_malloc_amt + cur_arena_amt); - // nothing to report in this category - if (total_reserved == 0) { - return; - } - int diff_reserved = (reserved_diff + malloc_diff + arena_diff); + outputStream* out = output(); + const char* scale = current_scale(); - // category summary - _output->print("-%26s (reserved=%d%s", MemBaseline::type2name(type), - total_reserved, unit); + // Total reserved and committed memory in current baseline + size_t current_reserved_amount = reserved_total (current_malloc, current_vm); + size_t current_committed_amount = committed_total(current_malloc, current_vm); - if (diff_reserved != 0) { - _output->print(" %+d%s", diff_reserved, unit); + // Total reserved and committed memory in early baseline + size_t early_reserved_amount = reserved_total(early_malloc, early_vm); + size_t early_committed_amount = committed_total(early_malloc, early_vm); + + // Adjust virtual memory total + if (flag == mtThread) { + const VirtualMemory* early_thread_stack_usage = + _early_baseline.virtual_memory(mtThreadStack); + const VirtualMemory* current_thread_stack_usage = + _current_baseline.virtual_memory(mtThreadStack); + + early_reserved_amount += early_thread_stack_usage->reserved(); + early_committed_amount += early_thread_stack_usage->committed(); + + current_reserved_amount += current_thread_stack_usage->reserved(); + current_committed_amount += current_thread_stack_usage->committed(); + } else if (flag == mtNMT) { + early_reserved_amount += _early_baseline.malloc_tracking_overhead(); + early_committed_amount += _early_baseline.malloc_tracking_overhead(); + + current_reserved_amount += _current_baseline.malloc_tracking_overhead(); + current_committed_amount += _current_baseline.malloc_tracking_overhead(); + } + + if (amount_in_current_scale(current_reserved_amount) > 0 || + diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) { + + // print summary line + out->print("-%26s (", NMTUtil::flag_to_name(flag)); + print_virtual_memory_diff(current_reserved_amount, current_committed_amount, + early_reserved_amount, early_committed_amount); + out->print_cr(")"); + + // detail lines + if (flag == mtClass) { + // report class count + out->print("%27s (classes #" SIZE_FORMAT "", " ", _current_baseline.class_count()); + int class_count_diff = (int)(_current_baseline.class_count() - + _early_baseline.class_count()); + if (_current_baseline.class_count() != _early_baseline.class_count()) { + out->print(" %+d", (int)(_current_baseline.class_count() - _early_baseline.class_count())); + } + out->print_cr(")"); + } else if (flag == mtThread) { + // report thread count + out->print("%27s (thread #" SIZE_FORMAT "", " ", _current_baseline.thread_count()); + int thread_count_diff = (int)(_current_baseline.thread_count() - + _early_baseline.thread_count()); + if (thread_count_diff != 0) { + out->print(" %+d", thread_count_diff); + } + out->print_cr(")"); + + // report thread stack + const VirtualMemory* current_thread_stack = + _current_baseline.virtual_memory(mtThreadStack); + const VirtualMemory* early_thread_stack = + _early_baseline.virtual_memory(mtThreadStack); + + out->print("%27s (stack: ", " "); + print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(), + early_thread_stack->reserved(), early_thread_stack->committed()); + out->print_cr(")"); } - size_t total_committed = cur_committed_amt + cur_malloc_amt + cur_arena_amt; - _output->print(", committed=%d%s", total_committed, unit); - - int total_committed_diff = committed_diff + malloc_diff + arena_diff; - if (total_committed_diff != 0) { - _output->print(" %+d%s", total_committed_diff, unit); + // Report malloc'd memory + size_t current_malloc_amount = current_malloc->malloc_size(); + size_t early_malloc_amount = early_malloc->malloc_size(); + if (amount_in_current_scale(current_malloc_amount) > 0 || + diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) { + out->print("%28s(", " "); + print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(), + early_malloc_amount, early_malloc->malloc_count()); + out->print_cr(")"); } - _output->print_cr(")"); - - // special cases - if (type == mtClass) { - _output->print("%27s (classes #%d", " ", _num_of_classes); - if (_num_of_classes_diff != 0) { - _output->print(" %+d", _num_of_classes_diff); - } - _output->print_cr(")"); - } else if (type == mtThread) { - // thread count - _output->print("%27s (thread #%d", " ", _num_of_threads); - if (_num_of_threads_diff != 0) { - _output->print_cr(" %+d)", _num_of_threads_diff); - } else { - _output->print_cr(")"); - } - _output->print("%27s (stack: reserved=%d%s", " ", _thread_stack_reserved, unit); - if (_thread_stack_reserved_diff != 0) { - _output->print(" %+d%s", _thread_stack_reserved_diff, unit); - } - - _output->print(", committed=%d%s", _thread_stack_committed, unit); - if (_thread_stack_committed_diff != 0) { - _output->print(" %+d%s",_thread_stack_committed_diff, unit); - } - - _output->print_cr(")"); + // Report virtual memory + if (amount_in_current_scale(current_vm->reserved()) > 0 || + diff_in_current_scale(current_vm->reserved(), early_vm->reserved()) != 0) { + out->print("%27s (mmap: ", " "); + print_virtual_memory_diff(current_vm->reserved(), current_vm->committed(), + early_vm->reserved(), early_vm->committed()); + out->print_cr(")"); } - // malloc'd memory - if (cur_malloc_amt > 0) { - _output->print("%27s (malloc=%d%s", " ", cur_malloc_amt, unit); - if (malloc_diff != 0) { - _output->print(" %+d%s", malloc_diff, unit); - } - if (type != mtChunk) { - _output->print(", #%d", cur_malloc_count); - if (malloc_count_diff) { - _output->print(" %+d", malloc_count_diff); - } - } - _output->print_cr(")"); + // Report arena memory + if (amount_in_current_scale(current_malloc->arena_size()) > 0 || + diff_in_current_scale(current_malloc->arena_size(), early_malloc->arena_size()) != 0) { + out->print("%28s(", " "); + print_arena_diff(current_malloc->arena_size(), current_malloc->arena_count(), + early_malloc->arena_size(), early_malloc->arena_count()); + out->print_cr(")"); } - // mmap'd memory - if (cur_reserved_amt > 0) { - _output->print("%27s (mmap: reserved=%d%s", " ", cur_reserved_amt, unit); - if (reserved_diff != 0) { - _output->print(" %+d%s", reserved_diff, unit); + // Report native memory tracking overhead + if (flag == mtNMT) { + size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead()); + size_t early_tracking_overhead = amount_in_current_scale(_early_baseline.malloc_tracking_overhead()); + + out->print("%27s (tracking overhead=" SIZE_FORMAT "%s", " ", + amount_in_current_scale(_current_baseline.malloc_tracking_overhead()), scale); + + long overhead_diff = diff_in_current_scale(_current_baseline.malloc_tracking_overhead(), + _early_baseline.malloc_tracking_overhead()); + if (overhead_diff != 0) { + out->print(" %+ld%s", overhead_diff, scale); } - - _output->print(", committed=%d%s", cur_committed_amt, unit); - if (committed_diff != 0) { - _output->print(" %+d%s", committed_diff, unit); - } - _output->print_cr(")"); + out->print_cr(")"); } - - // arena memory - if (cur_arena_amt > 0) { - _output->print("%27s (arena=%d%s", " ", cur_arena_amt, unit); - if (arena_diff != 0) { - _output->print(" %+d%s", arena_diff, unit); - } - _output->print(", #%d", cur_arena_count); - if (arena_count_diff != 0) { - _output->print(" %+d", arena_count_diff); - } - _output->print_cr(")"); - } - - _output->print_cr(" "); + out->print_cr(" "); } } -void BaselineTTYOutputer::diff_malloc_callsite(address pc, - size_t cur_malloc_amt, size_t cur_malloc_count, - int malloc_diff, int malloc_count_diff) { - if (malloc_diff != 0) { - const char* unit = memory_unit(_scale); - char buf[128]; - int offset; - if (pc == 0) { - _output->print_cr("[BOOTSTRAP]%18s", " "); +void MemDetailDiffReporter::report_diff() { + MemSummaryDiffReporter::report_diff(); + diff_malloc_sites(); + diff_virtual_memory_sites(); +} + +void MemDetailDiffReporter::diff_malloc_sites() const { + MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site); + MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site); + + const MallocSite* early_site = early_itr.next(); + const MallocSite* current_site = current_itr.next(); + + while (early_site != NULL || current_site != NULL) { + if (early_site == NULL) { + new_malloc_site(current_site); + current_site = current_itr.next(); + } else if (current_site == NULL) { + old_malloc_site(early_site); + early_site = early_itr.next(); } else { - if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) { - _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset); - _output->print("%28s", " "); + int compVal = current_site->call_stack()->compare(*early_site->call_stack()); + if (compVal < 0) { + new_malloc_site(current_site); + current_site = current_itr.next(); + } else if (compVal > 0) { + old_malloc_site(early_site); + early_site = early_itr.next(); } else { - _output->print("[" PTR_FORMAT "]%18s", pc, " "); + diff_malloc_site(early_site, current_site); + early_site = early_itr.next(); + current_site = current_itr.next(); } } - - _output->print("(malloc=%d%s", cur_malloc_amt, unit); - if (malloc_diff != 0) { - _output->print(" %+d%s", malloc_diff, unit); - } - _output->print(", #%d", cur_malloc_count); - if (malloc_count_diff != 0) { - _output->print(" %+d", malloc_count_diff); - } - _output->print_cr(")"); - _output->print_cr(" "); } } -void BaselineTTYOutputer::diff_virtual_memory_callsite(address pc, - size_t cur_reserved_amt, size_t cur_committed_amt, - int reserved_diff, int committed_diff) { - if (reserved_diff != 0 || committed_diff != 0) { - const char* unit = memory_unit(_scale); - char buf[64]; - int offset; - if (pc == 0) { - _output->print_cr("[BOOSTRAP]%18s", " "); +void MemDetailDiffReporter::diff_virtual_memory_sites() const { + VirtualMemorySiteIterator early_itr = _early_baseline.virtual_memory_sites(MemBaseline::by_site); + VirtualMemorySiteIterator current_itr = _current_baseline.virtual_memory_sites(MemBaseline::by_site); + + const VirtualMemoryAllocationSite* early_site = early_itr.next(); + const VirtualMemoryAllocationSite* current_site = current_itr.next(); + + while (early_site != NULL || current_site != NULL) { + if (early_site == NULL) { + new_virtual_memory_site(current_site); + current_site = current_itr.next(); + } else if (current_site == NULL) { + old_virtual_memory_site(early_site); + early_site = early_itr.next(); } else { - if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) { - _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset); - _output->print("%28s", " "); + int compVal = current_site->call_stack()->compare(*early_site->call_stack()); + if (compVal < 0) { + new_virtual_memory_site(current_site); + current_site = current_itr.next(); + } else if (compVal > 0) { + old_virtual_memory_site(early_site); + early_site = early_itr.next(); } else { - _output->print("[" PTR_FORMAT "]%18s", pc, " "); + diff_virtual_memory_site(early_site, current_site); + early_site = early_itr.next(); + current_site = current_itr.next(); } } - - _output->print("(mmap: reserved=%d%s", cur_reserved_amt, unit); - if (reserved_diff != 0) { - _output->print(" %+d%s", reserved_diff, unit); - } - _output->print(", committed=%d%s", cur_committed_amt, unit); - if (committed_diff != 0) { - _output->print(" %+d%s", committed_diff, unit); - } - _output->print_cr(")"); - _output->print_cr(" "); } } + + +void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const { + diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(), + 0, 0); +} + +void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const { + diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(), + malloc_site->count()); +} + +void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early, + const MallocSite* current) const { + diff_malloc_site(current->call_stack(), current->size(), current->count(), + early->size(), early->count()); +} + +void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size, + size_t current_count, size_t early_size, size_t early_count) const { + outputStream* out = output(); + + assert(stack != NULL, "NULL stack"); + + if (diff_in_current_scale(current_size, early_size) == 0) { + return; + } + + stack->print_on(out); + out->print("%28s (", " "); + print_malloc_diff(current_size, current_count, + early_size, early_count); + + out->print_cr(")\n"); +} + + +void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const { + diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0); +} + +void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const { + diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed()); +} + +void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early, + const VirtualMemoryAllocationSite* current) const { + diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(), + early->reserved(), early->committed()); +} + +void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved, + size_t current_committed, size_t early_reserved, size_t early_committed) const { + outputStream* out = output(); + + // no change + if (diff_in_current_scale(current_reserved, early_reserved) == 0 && + diff_in_current_scale(current_committed, early_committed) == 0) { + return; + } + + stack->print_on(out); + out->print("%28s (mmap: ", " "); + print_virtual_memory_diff(current_reserved, current_committed, + early_reserved, early_committed); + + out->print_cr(")\n"); + } + --- ./hotspot/src/share/vm/services/memReporter.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/memReporter.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,262 +25,203 @@ #ifndef SHARE_VM_SERVICES_MEM_REPORTER_HPP #define SHARE_VM_SERVICES_MEM_REPORTER_HPP -#include "runtime/mutexLocker.hpp" -#include "services/memBaseline.hpp" -#include "services/memTracker.hpp" -#include "utilities/ostream.hpp" -#include "utilities/macros.hpp" - #if INCLUDE_NMT +#include "oops/instanceKlass.hpp" +#include "services/memBaseline.hpp" +#include "services/nmtCommon.hpp" +#include "services/mallocTracker.hpp" +#include "services/virtualMemoryTracker.hpp" + /* - * MemBaselineReporter reports data to this outputer class, - * ReportOutputer is responsible for format, store and redirect - * the data to the final destination. - */ -class BaselineOutputer : public StackObj { + * Base class that provides helpers +*/ +class MemReporterBase : public StackObj { + private: + size_t _scale; // report in this scale + outputStream* _output; // destination + public: - // start to report memory usage in specified scale. - // if report_diff = true, the reporter reports baseline comparison - // information. + MemReporterBase(outputStream* out = NULL, size_t scale = K) + : _scale(scale) { + _output = (out == NULL) ? tty : out; + } - virtual void start(size_t scale, bool report_diff = false) = 0; - // Done reporting - virtual void done() = 0; + protected: + inline outputStream* output() const { + return _output; + } + // Current reporting scale + inline const char* current_scale() const { + return NMTUtil::scale_name(_scale); + } + // Convert memory amount in bytes to current reporting scale + inline size_t amount_in_current_scale(size_t amount) const { + return NMTUtil::amount_in_scale(amount, _scale); + } - /* report baseline summary information */ - virtual void total_usage(size_t total_reserved, - size_t total_committed) = 0; - virtual void num_of_classes(size_t classes) = 0; - virtual void num_of_threads(size_t threads) = 0; + // Convert diff amount in bytes to current reporting scale + inline long diff_in_current_scale(size_t s1, size_t s2) const { + long amount = (long)(s1 - s2); + long scale = (long)_scale; + amount = (amount > 0) ? (amount + scale / 2) : (amount - scale / 2); + return amount / scale; + } - virtual void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) = 0; + // Helper functions + // Calculate total reserved and committed amount + size_t reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const; + size_t committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const; - /* report baseline summary comparison */ - virtual void diff_total_usage(size_t total_reserved, - size_t total_committed, - int reserved_diff, - int committed_diff) = 0; - virtual void diff_num_of_classes(size_t classes, int diff) = 0; - virtual void diff_num_of_threads(size_t threads, int diff) = 0; - virtual void diff_thread_info(size_t stack_reserved, size_t stack_committed, - int stack_reserved_diff, int stack_committed_diff) = 0; + // Print summary total, malloc and virtual memory + void print_total(size_t reserved, size_t committed) const; + void print_malloc(size_t amount, size_t count) const; + void print_virtual_memory(size_t reserved, size_t committed) const; + void print_malloc_line(size_t amount, size_t count) const; + void print_virtual_memory_line(size_t reserved, size_t committed) const; + void print_arena_line(size_t amount, size_t count) const; - /* - * memory summary by memory types. - * for each memory type, following summaries are reported: - * - reserved amount, committed amount - * - malloc'd amount, malloc count - * - arena amount, arena count - */ - - // start reporting memory summary by memory type - virtual void start_category_summary() = 0; - - virtual void category_summary(MEMFLAGS type, size_t reserved_amt, - size_t committed_amt, - size_t malloc_amt, size_t malloc_count, - size_t arena_amt, size_t arena_count) = 0; - - virtual void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt, - size_t cur_committed_amt, - size_t cur_malloc_amt, size_t cur_malloc_count, - size_t cur_arena_amt, size_t cur_arena_count, - int reserved_diff, int committed_diff, int malloc_diff, - int malloc_count_diff, int arena_diff, - int arena_count_diff) = 0; - - virtual void done_category_summary() = 0; - - virtual void start_virtual_memory_map() = 0; - virtual void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc) = 0; - virtual void committed_memory_region(address base, address end, size_t size, address pc) = 0; - virtual void done_virtual_memory_map() = 0; - - /* - * Report callsite information - */ - virtual void start_callsite() = 0; - virtual void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count) = 0; - virtual void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt) = 0; - - virtual void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count, - int malloc_diff, int malloc_count_diff) = 0; - virtual void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt, - int reserved_diff, int committed_diff) = 0; - - virtual void done_callsite() = 0; - - // return current scale in "KB", "MB" or "GB" - static const char* memory_unit(size_t scale); + void print_virtual_memory_region(const char* type, address base, size_t size) const; }; /* - * This class reports processed data from a baseline or - * the changes between the two baseline. + * The class is for generating summary tracking report. */ -class BaselineReporter : public StackObj { +class MemSummaryReporter : public MemReporterBase { private: - BaselineOutputer& _outputer; - size_t _scale; + MallocMemorySnapshot* _malloc_snapshot; + VirtualMemorySnapshot* _vm_snapshot; + size_t _class_count; public: - // construct a reporter that reports memory usage - // in specified scale - BaselineReporter(BaselineOutputer& outputer, size_t scale = K): - _outputer(outputer) { - _scale = scale; - } - virtual void report_baseline(const MemBaseline& baseline, bool summary_only = false); - virtual void diff_baselines(const MemBaseline& cur, const MemBaseline& prev, - bool summary_only = false); + // This constructor is for normal reporting from a recent baseline. + MemSummaryReporter(MemBaseline& baseline, outputStream* output, + size_t scale = K) : MemReporterBase(output, scale), + _malloc_snapshot(baseline.malloc_memory_snapshot()), + _vm_snapshot(baseline.virtual_memory_snapshot()), + _class_count(baseline.class_count()) { } - void set_scale(size_t scale); - size_t scale() const { return _scale; } + // Generate summary report + virtual void report(); private: - void report_summaries(const MemBaseline& baseline); - void report_virtual_memory_map(const MemBaseline& baseline); - void report_callsites(const MemBaseline& baseline); - - void diff_summaries(const MemBaseline& cur, const MemBaseline& prev); - void diff_callsites(const MemBaseline& cur, const MemBaseline& prev); - - // calculate memory size in current memory scale - size_t amount_in_current_scale(size_t amt) const; - // diff two unsigned values in current memory scale - int diff_in_current_scale(size_t value1, size_t value2) const; - // diff two unsigned value - int diff(size_t value1, size_t value2) const; + // Report summary for each memory type + void report_summary_of_type(MEMFLAGS type, MallocMemory* malloc_memory, + VirtualMemory* virtual_memory); }; /* - * tty output implementation. Native memory tracking - * DCmd uses this outputer. + * The class is for generating detail tracking report. */ -class BaselineTTYOutputer : public BaselineOutputer { +class MemDetailReporter : public MemSummaryReporter { private: - size_t _scale; - - size_t _num_of_classes; - size_t _num_of_threads; - size_t _thread_stack_reserved; - size_t _thread_stack_committed; - - int _num_of_classes_diff; - int _num_of_threads_diff; - int _thread_stack_reserved_diff; - int _thread_stack_committed_diff; - - outputStream* _output; + MemBaseline& _baseline; public: - BaselineTTYOutputer(outputStream* st) { - _scale = K; - _num_of_classes = 0; - _num_of_threads = 0; - _thread_stack_reserved = 0; - _thread_stack_committed = 0; - _num_of_classes_diff = 0; - _num_of_threads_diff = 0; - _thread_stack_reserved_diff = 0; - _thread_stack_committed_diff = 0; - _output = st; + MemDetailReporter(MemBaseline& baseline, outputStream* output, size_t scale = K) : + MemSummaryReporter(baseline, output, scale), + _baseline(baseline) { } + + // Generate detail report. + // The report contains summary and detail sections. + virtual void report() { + MemSummaryReporter::report(); + report_virtual_memory_map(); + report_detail(); } - // begin reporting memory usage in specified scale - void start(size_t scale, bool report_diff = false); - // done reporting - void done(); + private: + // Report detail tracking data. + void report_detail(); + // Report virtual memory map + void report_virtual_memory_map(); + // Report malloc allocation sites + void report_malloc_sites(); + // Report virtual memory reservation sites + void report_virtual_memory_allocation_sites(); - // total memory usage - void total_usage(size_t total_reserved, - size_t total_committed); - // report total loaded classes - void num_of_classes(size_t classes) { - _num_of_classes = classes; + // Report a virtual memory region + void report_virtual_memory_region(const ReservedMemoryRegion* rgn); +}; + +/* + * The class is for generating summary comparison report. + * It compares current memory baseline against an early baseline. + */ +class MemSummaryDiffReporter : public MemReporterBase { + protected: + MemBaseline& _early_baseline; + MemBaseline& _current_baseline; + + public: + MemSummaryDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline, + outputStream* output, size_t scale = K) : MemReporterBase(output, scale), + _early_baseline(early_baseline), _current_baseline(current_baseline) { + assert(early_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined"); + assert(current_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined"); } - void num_of_threads(size_t threads) { - _num_of_threads = threads; - } + // Generate summary comparison report + virtual void report_diff(); - void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) { - _thread_stack_reserved = stack_reserved_amt; - _thread_stack_committed = stack_committed_amt; - } + private: + // report the comparison of each memory type + void diff_summary_of_type(MEMFLAGS type, + const MallocMemory* early_malloc, const VirtualMemory* early_vm, + const MallocMemory* current_malloc, const VirtualMemory* current_vm) const; - void diff_total_usage(size_t total_reserved, - size_t total_committed, - int reserved_diff, - int committed_diff); - - void diff_num_of_classes(size_t classes, int diff) { - _num_of_classes = classes; - _num_of_classes_diff = diff; - } - - void diff_num_of_threads(size_t threads, int diff) { - _num_of_threads = threads; - _num_of_threads_diff = diff; - } - - void diff_thread_info(size_t stack_reserved_amt, size_t stack_committed_amt, - int stack_reserved_diff, int stack_committed_diff) { - _thread_stack_reserved = stack_reserved_amt; - _thread_stack_committed = stack_committed_amt; - _thread_stack_reserved_diff = stack_reserved_diff; - _thread_stack_committed_diff = stack_committed_diff; - } - - /* - * Report memory summary categoriuzed by memory types. - * For each memory type, following summaries are reported: - * - reserved amount, committed amount - * - malloc-ed amount, malloc count - * - arena amount, arena count - */ - // start reporting memory summary by memory type - void start_category_summary(); - void category_summary(MEMFLAGS type, size_t reserved_amt, size_t committed_amt, - size_t malloc_amt, size_t malloc_count, - size_t arena_amt, size_t arena_count); - - void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt, - size_t cur_committed_amt, - size_t cur_malloc_amt, size_t cur_malloc_count, - size_t cur_arena_amt, size_t cur_arena_count, - int reserved_diff, int committed_diff, int malloc_diff, - int malloc_count_diff, int arena_diff, - int arena_count_diff); - - void done_category_summary(); - - // virtual memory map - void start_virtual_memory_map(); - void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc); - void committed_memory_region(address base, address end, size_t size, address pc); - void done_virtual_memory_map(); - - - /* - * Report callsite information - */ - void start_callsite(); - void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count); - void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt); - - void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count, - int malloc_diff, int malloc_count_diff); - void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt, - int reserved_diff, int committed_diff); - - void done_callsite(); + protected: + void print_malloc_diff(size_t current_amount, size_t current_count, + size_t early_amount, size_t early_count) const; + void print_virtual_memory_diff(size_t current_reserved, size_t current_committed, + size_t early_reserved, size_t early_committed) const; + void print_arena_diff(size_t current_amount, size_t current_count, + size_t early_amount, size_t early_count) const; }; +/* + * The class is for generating detail comparison report. + * It compares current memory baseline against an early baseline, + * both baselines have to be detail baseline. + */ +class MemDetailDiffReporter : public MemSummaryDiffReporter { + public: + MemDetailDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline, + outputStream* output, size_t scale = K) : + MemSummaryDiffReporter(early_baseline, current_baseline, output, scale) { } + + // Generate detail comparison report + virtual void report_diff(); + + // Malloc allocation site comparison + void diff_malloc_sites() const; + // Virutal memory reservation site comparison + void diff_virtual_memory_sites() const; + + // New malloc allocation site in recent baseline + void new_malloc_site (const MallocSite* site) const; + // The malloc allocation site is not in recent baseline + void old_malloc_site (const MallocSite* site) const; + // Compare malloc allocation site, it is in both baselines + void diff_malloc_site(const MallocSite* early, const MallocSite* current) const; + + // New virtual memory allocation site in recent baseline + void new_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const; + // The virtual memory allocation site is not in recent baseline + void old_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const; + // Compare virtual memory allocation site, it is in both baseline + void diff_virtual_memory_site(const VirtualMemoryAllocationSite* early, + const VirtualMemoryAllocationSite* current) const; + + void diff_malloc_site(const NativeCallStack* stack, size_t current_size, + size_t currrent_count, size_t early_size, size_t early_count) const; + void diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved, + size_t current_committed, size_t early_reserved, size_t early_committed) const; +}; #endif // INCLUDE_NMT -#endif // SHARE_VM_SERVICES_MEM_REPORTER_HPP +#endif + --- ./hotspot/src/share/vm/services/memSnapshot.cpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,748 +0,0 @@ -/* - * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "runtime/mutexLocker.hpp" -#include "utilities/decoder.hpp" -#include "services/memBaseline.hpp" -#include "services/memPtr.hpp" -#include "services/memPtrArray.hpp" -#include "services/memSnapshot.hpp" -#include "services/memTracker.hpp" - -PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC - -#ifdef ASSERT - -void decode_pointer_record(MemPointerRecord* rec) { - tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT "] size = %d bytes", rec->addr(), - rec->addr() + rec->size(), (int)rec->size()); - tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags()))); - if (rec->is_vm_pointer()) { - if (rec->is_allocation_record()) { - tty->print_cr(" (reserve)"); - } else if (rec->is_commit_record()) { - tty->print_cr(" (commit)"); - } else if (rec->is_uncommit_record()) { - tty->print_cr(" (uncommit)"); - } else if (rec->is_deallocation_record()) { - tty->print_cr(" (release)"); - } else { - tty->print_cr(" (tag)"); - } - } else { - if (rec->is_arena_memory_record()) { - tty->print_cr(" (arena size)"); - } else if (rec->is_allocation_record()) { - tty->print_cr(" (malloc)"); - } else { - tty->print_cr(" (free)"); - } - } - if (MemTracker::track_callsite()) { - char buf[1024]; - address pc = ((MemPointerRecordEx*)rec)->pc(); - if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) { - tty->print_cr("\tfrom %s", buf); - } else { - tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc); - } - } -} - -void decode_vm_region_record(VMMemRegion* rec) { - tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(), - rec->addr() + rec->size()); - tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags()))); - if (rec->is_allocation_record()) { - tty->print_cr(" (reserved)"); - } else if (rec->is_commit_record()) { - tty->print_cr(" (committed)"); - } else { - ShouldNotReachHere(); - } - if (MemTracker::track_callsite()) { - char buf[1024]; - address pc = ((VMMemRegionEx*)rec)->pc(); - if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) { - tty->print_cr("\tfrom %s", buf); - } else { - tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc); - } - - } -} - -#endif - - -bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) { - VMMemRegionEx new_rec; - assert(rec->is_allocation_record() || rec->is_commit_record(), - "Sanity check"); - if (MemTracker::track_callsite()) { - new_rec.init((MemPointerRecordEx*)rec); - } else { - new_rec.init(rec); - } - return insert(&new_rec); -} - -bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) { - VMMemRegionEx new_rec; - assert(rec->is_allocation_record() || rec->is_commit_record(), - "Sanity check"); - if (MemTracker::track_callsite()) { - new_rec.init((MemPointerRecordEx*)rec); - } else { - new_rec.init(rec); - } - return insert_after(&new_rec); -} - -// we don't consolidate reserved regions, since they may be categorized -// in different types. -bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) { - assert(rec->is_allocation_record(), "Sanity check"); - VMMemRegion* reserved_region = (VMMemRegion*)current(); - - // we don't have anything yet - if (reserved_region == NULL) { - return insert_record(rec); - } - - assert(reserved_region->is_reserved_region(), "Sanity check"); - // duplicated records - if (reserved_region->is_same_region(rec)) { - return true; - } - // Overlapping stack regions indicate that a JNI thread failed to - // detach from the VM before exiting. This leaks the JavaThread object. - if (CheckJNICalls) { - guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack || - !reserved_region->overlaps_region(rec), - "Attached JNI thread exited without being detached"); - } - // otherwise, we should not have overlapping reserved regions - assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack || - reserved_region->base() > rec->addr(), "Just check: locate()"); - assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack || - !reserved_region->overlaps_region(rec), "overlapping reserved regions"); - - return insert_record(rec); -} - -// we do consolidate committed regions -bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) { - assert(rec->is_commit_record(), "Sanity check"); - VMMemRegion* reserved_rgn = (VMMemRegion*)current(); - assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec), - "Sanity check"); - - // thread's native stack is always marked as "committed", ignore - // the "commit" operation for creating stack guard pages - if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack && - FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) { - return true; - } - - // if the reserved region has any committed regions - VMMemRegion* committed_rgn = (VMMemRegion*)next(); - while (committed_rgn != NULL && committed_rgn->is_committed_region()) { - // duplicated commit records - if(committed_rgn->contains_region(rec)) { - return true; - } else if (committed_rgn->overlaps_region(rec)) { - // overlaps front part - if (rec->addr() < committed_rgn->addr()) { - committed_rgn->expand_region(rec->addr(), - committed_rgn->addr() - rec->addr()); - } else { - // overlaps tail part - address committed_rgn_end = committed_rgn->addr() + - committed_rgn->size(); - assert(committed_rgn_end < rec->addr() + rec->size(), - "overlap tail part"); - committed_rgn->expand_region(committed_rgn_end, - (rec->addr() + rec->size()) - committed_rgn_end); - } - } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) { - // adjunct each other - committed_rgn->expand_region(rec->addr(), rec->size()); - VMMemRegion* next_reg = (VMMemRegion*)next(); - // see if we can consolidate next committed region - if (next_reg != NULL && next_reg->is_committed_region() && - next_reg->base() == committed_rgn->base() + committed_rgn->size()) { - committed_rgn->expand_region(next_reg->base(), next_reg->size()); - // delete merged region - remove(); - } - return true; - } else if (committed_rgn->base() > rec->addr()) { - // found the location, insert this committed region - return insert_record(rec); - } - committed_rgn = (VMMemRegion*)next(); - } - return insert_record(rec); -} - -bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) { - assert(rec->is_uncommit_record(), "sanity check"); - VMMemRegion* cur; - cur = (VMMemRegion*)current(); - assert(cur->is_reserved_region() && cur->contains_region(rec), - "Sanity check"); - // thread's native stack is always marked as "committed", ignore - // the "commit" operation for creating stack guard pages - if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack && - FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) { - return true; - } - - cur = (VMMemRegion*)next(); - while (cur != NULL && cur->is_committed_region()) { - // region already uncommitted, must be due to duplicated record - if (cur->addr() >= rec->addr() + rec->size()) { - break; - } else if (cur->contains_region(rec)) { - // uncommit whole region - if (cur->is_same_region(rec)) { - remove(); - break; - } else if (rec->addr() == cur->addr() || - rec->addr() + rec->size() == cur->addr() + cur->size()) { - // uncommitted from either end of current memory region. - cur->exclude_region(rec->addr(), rec->size()); - break; - } else { // split the committed region and release the middle - address high_addr = cur->addr() + cur->size(); - size_t sz = high_addr - rec->addr(); - cur->exclude_region(rec->addr(), sz); - sz = high_addr - (rec->addr() + rec->size()); - if (MemTracker::track_callsite()) { - MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz, - ((VMMemRegionEx*)cur)->pc()); - return insert_record_after(&tmp); - } else { - MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz); - return insert_record_after(&tmp); - } - } - } - cur = (VMMemRegion*)next(); - } - - // we may not find committed record due to duplicated records - return true; -} - -bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) { - assert(rec->is_deallocation_record(), "Sanity check"); - VMMemRegion* cur = (VMMemRegion*)current(); - assert(cur->is_reserved_region() && cur->contains_region(rec), - "Sanity check"); - if (rec->is_same_region(cur)) { - - // In snapshot, the virtual memory records are sorted in following orders: - // 1. virtual memory's base address - // 2. virtual memory reservation record, followed by commit records within this reservation. - // The commit records are also in base address order. - // When a reserved region is released, we want to remove the reservation record and all - // commit records following it. -#ifdef ASSERT - address low_addr = cur->addr(); - address high_addr = low_addr + cur->size(); -#endif - // remove virtual memory reservation record - remove(); - // remove committed regions within above reservation - VMMemRegion* next_region = (VMMemRegion*)current(); - while (next_region != NULL && next_region->is_committed_region()) { - assert(next_region->addr() >= low_addr && - next_region->addr() + next_region->size() <= high_addr, - "Range check"); - remove(); - next_region = (VMMemRegion*)current(); - } - } else if (rec->addr() == cur->addr() || - rec->addr() + rec->size() == cur->addr() + cur->size()) { - // released region is at either end of this region - cur->exclude_region(rec->addr(), rec->size()); - assert(check_reserved_region(), "Integrity check"); - } else { // split the reserved region and release the middle - address high_addr = cur->addr() + cur->size(); - size_t sz = high_addr - rec->addr(); - cur->exclude_region(rec->addr(), sz); - sz = high_addr - rec->addr() - rec->size(); - if (MemTracker::track_callsite()) { - MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz, - ((VMMemRegionEx*)cur)->pc()); - bool ret = insert_reserved_region(&tmp); - assert(!ret || check_reserved_region(), "Integrity check"); - return ret; - } else { - MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz); - bool ret = insert_reserved_region(&tmp); - assert(!ret || check_reserved_region(), "Integrity check"); - return ret; - } - } - return true; -} - -bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) { - // skip all 'commit' records associated with previous reserved region - VMMemRegion* p = (VMMemRegion*)next(); - while (p != NULL && p->is_committed_region() && - p->base() + p->size() < rec->addr()) { - p = (VMMemRegion*)next(); - } - return insert_record(rec); -} - -bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) { - assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained"); - address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL); - if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region - size_t sz = rgn->size() - new_rgn_size; - // the original region becomes 'new' region - rgn->exclude_region(new_rgn_addr + new_rgn_size, sz); - // remaining becomes next region - MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc); - return insert_reserved_region(&next_rgn); - } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) { - rgn->exclude_region(new_rgn_addr, new_rgn_size); - MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc); - return insert_reserved_region(&next_rgn); - } else { - // the orginal region will be split into three - address rgn_high_addr = rgn->base() + rgn->size(); - // first region - rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr)); - // the second region is the new region - MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc); - if (!insert_reserved_region(&new_rgn)) return false; - // the remaining region - MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), - rgn_high_addr - (new_rgn_addr + new_rgn_size), pc); - return insert_reserved_region(&rem_rgn); - } -} - -static int sort_in_seq_order(const void* p1, const void* p2) { - assert(p1 != NULL && p2 != NULL, "Sanity check"); - const MemPointerRecord* mp1 = (MemPointerRecord*)p1; - const MemPointerRecord* mp2 = (MemPointerRecord*)p2; - return (mp1->seq() - mp2->seq()); -} - -bool StagingArea::init() { - if (MemTracker::track_callsite()) { - _malloc_data = new (std::nothrow)MemPointerArrayImpl(); - _vm_data = new (std::nothrow)MemPointerArrayImpl(); - } else { - _malloc_data = new (std::nothrow)MemPointerArrayImpl(); - _vm_data = new (std::nothrow)MemPointerArrayImpl(); - } - - if (_malloc_data != NULL && _vm_data != NULL && - !_malloc_data->out_of_memory() && - !_vm_data->out_of_memory()) { - return true; - } else { - if (_malloc_data != NULL) delete _malloc_data; - if (_vm_data != NULL) delete _vm_data; - _malloc_data = NULL; - _vm_data = NULL; - return false; - } -} - - -VMRecordIterator StagingArea::virtual_memory_record_walker() { - MemPointerArray* arr = vm_data(); - // sort into seq number order - arr->sort((FN_SORT)sort_in_seq_order); - return VMRecordIterator(arr); -} - - -MemSnapshot::MemSnapshot() { - if (MemTracker::track_callsite()) { - _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl(); - _vm_ptrs = new (std::nothrow)MemPointerArrayImpl(64, true); - } else { - _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl(); - _vm_ptrs = new (std::nothrow)MemPointerArrayImpl(64, true); - } - - _staging_area.init(); - _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock"); - NOT_PRODUCT(_untracked_count = 0;) - _number_of_classes = 0; -} - -MemSnapshot::~MemSnapshot() { - assert(MemTracker::shutdown_in_progress(), "native memory tracking still on"); - { - MutexLockerEx locker(_lock); - if (_alloc_ptrs != NULL) { - delete _alloc_ptrs; - _alloc_ptrs = NULL; - } - - if (_vm_ptrs != NULL) { - delete _vm_ptrs; - _vm_ptrs = NULL; - } - } - - if (_lock != NULL) { - delete _lock; - _lock = NULL; - } -} - - -void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) { - assert(dest != NULL && src != NULL, "Just check"); - assert(dest->addr() == src->addr(), "Just check"); - assert(dest->seq() > 0 && src->seq() > 0, "not sequenced"); - - if (MemTracker::track_callsite()) { - *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src; - } else { - *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src; - } -} - -void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) { - assert(src != NULL && dest != NULL, "Just check"); - assert(dest->seq() == 0 && src->seq() >0, "cast away sequence"); - - if (MemTracker::track_callsite()) { - *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src; - } else { - *(MemPointerRecord*)dest = *(MemPointerRecord*)src; - } -} - -// merge a recorder to the staging area -bool MemSnapshot::merge(MemRecorder* rec) { - assert(rec != NULL && !rec->out_of_memory(), "Just check"); - - SequencedRecordIterator itr(rec->pointer_itr()); - - MutexLockerEx lock(_lock, true); - MemPointerIterator malloc_staging_itr(_staging_area.malloc_data()); - MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current(); - MemPointerRecord* matched_rec; - - while (incoming_rec != NULL) { - if (incoming_rec->is_vm_pointer()) { - // we don't do anything with virtual memory records during merge - if (!_staging_area.vm_data()->append(incoming_rec)) { - return false; - } - } else { - // locate matched record and/or also position the iterator to proper - // location for this incoming record. - matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr()); - // we have not seen this memory block in this generation, - // so just add to staging area - if (matched_rec == NULL) { - if (!malloc_staging_itr.insert(incoming_rec)) { - return false; - } - } else if (incoming_rec->addr() == matched_rec->addr()) { - // whoever has higher sequence number wins - if (incoming_rec->seq() > matched_rec->seq()) { - copy_seq_pointer(matched_rec, incoming_rec); - } - } else if (incoming_rec->addr() < matched_rec->addr()) { - if (!malloc_staging_itr.insert(incoming_rec)) { - return false; - } - } else { - ShouldNotReachHere(); - } - } - incoming_rec = (MemPointerRecord*)itr.next(); - } - NOT_PRODUCT(void check_staging_data();) - return true; -} - - -// promote data to next generation -bool MemSnapshot::promote(int number_of_classes) { - assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check"); - assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL, - "Just check"); - MutexLockerEx lock(_lock, true); - - MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker(); - bool promoted = false; - if (promote_malloc_records(&malloc_itr)) { - VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker(); - if (promote_virtual_memory_records(&vm_itr)) { - promoted = true; - } - } - - NOT_PRODUCT(check_malloc_pointers();) - _staging_area.clear(); - _number_of_classes = number_of_classes; - return promoted; -} - -bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) { - MemPointerIterator malloc_snapshot_itr(_alloc_ptrs); - MemPointerRecord* new_rec = (MemPointerRecord*)itr->current(); - MemPointerRecord* matched_rec; - while (new_rec != NULL) { - matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr()); - // found matched memory block - if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) { - // snapshot already contains 'live' records - assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(), - "Sanity check"); - // update block states - if (new_rec->is_allocation_record()) { - assign_pointer(matched_rec, new_rec); - } else if (new_rec->is_arena_memory_record()) { - if (new_rec->size() == 0) { - // remove size record once size drops to 0 - malloc_snapshot_itr.remove(); - } else { - assign_pointer(matched_rec, new_rec); - } - } else { - // a deallocation record - assert(new_rec->is_deallocation_record(), "Sanity check"); - // an arena record can be followed by a size record, we need to remove both - if (matched_rec->is_arena_record()) { - MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next(); - if (next != NULL && next->is_arena_memory_record() && - next->is_memory_record_of_arena(matched_rec)) { - malloc_snapshot_itr.remove(); - } - } - // the memory is deallocated, remove related record(s) - malloc_snapshot_itr.remove(); - } - } else { - // don't insert size 0 record - if (new_rec->is_arena_memory_record() && new_rec->size() == 0) { - new_rec = NULL; - } - - if (new_rec != NULL) { - if (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) { - if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) { - if (!malloc_snapshot_itr.insert_after(new_rec)) { - return false; - } - } else { - if (!malloc_snapshot_itr.insert(new_rec)) { - return false; - } - } - } -#ifndef PRODUCT - else if (!has_allocation_record(new_rec->addr())) { - // NMT can not track some startup memory, which is allocated before NMT is on - _untracked_count ++; - } -#endif - } - } - new_rec = (MemPointerRecord*)itr->next(); - } - return true; -} - -bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) { - VMMemPointerIterator vm_snapshot_itr(_vm_ptrs); - MemPointerRecord* new_rec = (MemPointerRecord*)itr->current(); - VMMemRegion* reserved_rec; - while (new_rec != NULL) { - assert(new_rec->is_vm_pointer(), "Sanity check"); - - // locate a reserved region that contains the specified address, or - // the nearest reserved region has base address just above the specified - // address - reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr()); - if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) { - // snapshot can only have 'live' records - assert(reserved_rec->is_reserved_region(), "Sanity check"); - if (new_rec->is_allocation_record()) { - if (!reserved_rec->is_same_region(new_rec)) { - // only deal with split a bigger reserved region into smaller regions. - // So far, CDS is the only use case. - if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) { - return false; - } - } - } else if (new_rec->is_uncommit_record()) { - if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) { - return false; - } - } else if (new_rec->is_commit_record()) { - // insert or expand existing committed region to cover this - // newly committed region - if (!vm_snapshot_itr.add_committed_region(new_rec)) { - return false; - } - } else if (new_rec->is_deallocation_record()) { - // release part or all memory region - if (!vm_snapshot_itr.remove_released_region(new_rec)) { - return false; - } - } else if (new_rec->is_type_tagging_record()) { - // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range - // to different type. - assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone || - FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()), - "Sanity check"); - reserved_rec->tag(new_rec->flags()); - } else { - ShouldNotReachHere(); - } - } else { - /* - * The assertion failure indicates mis-matched virtual memory records. The likely - * scenario is, that some virtual memory operations are not going through os::xxxx_memory() - * api, which have to be tracked manually. (perfMemory is an example). - */ - assert(new_rec->is_allocation_record(), "Sanity check"); - if (!vm_snapshot_itr.add_reserved_region(new_rec)) { - return false; - } - } - new_rec = (MemPointerRecord*)itr->next(); - } - return true; -} - -#ifndef PRODUCT -void MemSnapshot::print_snapshot_stats(outputStream* st) { - st->print_cr("Snapshot:"); - st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(), - (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K); - - st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(), - (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K); - - st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(), - _staging_area.malloc_data()->capacity(), - (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(), - _staging_area.malloc_data()->instance_size()/K); - - st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(), - _staging_area.vm_data()->capacity(), - (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(), - _staging_area.vm_data()->instance_size()/K); - - st->print_cr("\tUntracked allocation: %d", _untracked_count); -} - -void MemSnapshot::check_malloc_pointers() { - MemPointerArrayIteratorImpl mItr(_alloc_ptrs); - MemPointerRecord* p = (MemPointerRecord*)mItr.current(); - MemPointerRecord* prev = NULL; - while (p != NULL) { - if (prev != NULL) { - assert(p->addr() >= prev->addr(), "sorting order"); - } - prev = p; - p = (MemPointerRecord*)mItr.next(); - } -} - -bool MemSnapshot::has_allocation_record(address addr) { - MemPointerArrayIteratorImpl itr(_staging_area.malloc_data()); - MemPointerRecord* cur = (MemPointerRecord*)itr.current(); - while (cur != NULL) { - if (cur->addr() == addr && cur->is_allocation_record()) { - return true; - } - cur = (MemPointerRecord*)itr.next(); - } - return false; -} -#endif // PRODUCT - -#ifdef ASSERT -void MemSnapshot::check_staging_data() { - MemPointerArrayIteratorImpl itr(_staging_area.malloc_data()); - MemPointerRecord* cur = (MemPointerRecord*)itr.current(); - MemPointerRecord* next = (MemPointerRecord*)itr.next(); - while (next != NULL) { - assert((next->addr() > cur->addr()) || - ((next->flags() & MemPointerRecord::tag_masks) > - (cur->flags() & MemPointerRecord::tag_masks)), - "sorting order"); - cur = next; - next = (MemPointerRecord*)itr.next(); - } - - MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data()); - cur = (MemPointerRecord*)vm_itr.current(); - while (cur != NULL) { - assert(cur->is_vm_pointer(), "virtual memory pointer only"); - cur = (MemPointerRecord*)vm_itr.next(); - } -} - -void MemSnapshot::dump_all_vm_pointers() { - MemPointerArrayIteratorImpl itr(_vm_ptrs); - VMMemRegion* ptr = (VMMemRegion*)itr.current(); - tty->print_cr("dump virtual memory pointers:"); - while (ptr != NULL) { - if (ptr->is_committed_region()) { - tty->print("\t"); - } - tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(), - (ptr->addr() + ptr->size()), ptr->flags()); - - if (MemTracker::track_callsite()) { - VMMemRegionEx* ex = (VMMemRegionEx*)ptr; - if (ex->pc() != NULL) { - char buf[1024]; - if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) { - tty->print_cr("\t%s", buf); - } else { - tty->cr(); - } - } - } - - ptr = (VMMemRegion*)itr.next(); - } - tty->flush(); -} -#endif // ASSERT - --- ./hotspot/src/share/vm/services/memSnapshot.hpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,408 +0,0 @@ -/* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP -#define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP - -#include "memory/allocation.hpp" -#include "runtime/mutex.hpp" -#include "runtime/mutexLocker.hpp" -#include "services/memBaseline.hpp" -#include "services/memPtrArray.hpp" - -// Snapshot pointer array iterator - -// The pointer array contains malloc-ed pointers -class MemPointerIterator : public MemPointerArrayIteratorImpl { - public: - MemPointerIterator(MemPointerArray* arr): - MemPointerArrayIteratorImpl(arr) { - assert(arr != NULL, "null array"); - } - -#ifdef ASSERT - virtual bool is_dup_pointer(const MemPointer* ptr1, - const MemPointer* ptr2) const { - MemPointerRecord* p1 = (MemPointerRecord*)ptr1; - MemPointerRecord* p2 = (MemPointerRecord*)ptr2; - - if (p1->addr() != p2->addr()) return false; - if ((p1->flags() & MemPointerRecord::tag_masks) != - (p2->flags() & MemPointerRecord::tag_masks)) { - return false; - } - // we do see multiple commit/uncommit on the same memory, it is ok - return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc || - (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; - } - - virtual bool insert(MemPointer* ptr) { - if (_pos > 0) { - MemPointer* p1 = (MemPointer*)ptr; - MemPointer* p2 = (MemPointer*)_array->at(_pos - 1); - assert(!is_dup_pointer(p1, p2), - err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); - } - if (_pos < _array->length() -1) { - MemPointer* p1 = (MemPointer*)ptr; - MemPointer* p2 = (MemPointer*)_array->at(_pos + 1); - assert(!is_dup_pointer(p1, p2), - err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); - } - return _array->insert_at(ptr, _pos); - } - - virtual bool insert_after(MemPointer* ptr) { - if (_pos > 0) { - MemPointer* p1 = (MemPointer*)ptr; - MemPointer* p2 = (MemPointer*)_array->at(_pos - 1); - assert(!is_dup_pointer(p1, p2), - err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); - } - if (_pos < _array->length() - 1) { - MemPointer* p1 = (MemPointer*)ptr; - MemPointer* p2 = (MemPointer*)_array->at(_pos + 1); - - assert(!is_dup_pointer(p1, p2), - err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); - } - if (_array->insert_at(ptr, _pos + 1)) { - _pos ++; - return true; - } - return false; - } -#endif - - virtual MemPointer* locate(address addr) { - MemPointer* cur = current(); - while (cur != NULL && cur->addr() < addr) { - cur = next(); - } - return cur; - } -}; - -class VMMemPointerIterator : public MemPointerIterator { - public: - VMMemPointerIterator(MemPointerArray* arr): - MemPointerIterator(arr) { - } - - // locate an existing reserved memory region that contains specified address, - // or the reserved region just above this address, where the incoming - // reserved region should be inserted. - virtual MemPointer* locate(address addr) { - reset(); - VMMemRegion* reg = (VMMemRegion*)current(); - while (reg != NULL) { - if (reg->is_reserved_region()) { - if (reg->contains_address(addr) || addr < reg->base()) { - return reg; - } - } - reg = (VMMemRegion*)next(); - } - return NULL; - } - - // following methods update virtual memory in the context - // of 'current' position, which is properly positioned by - // callers via locate method. - bool add_reserved_region(MemPointerRecord* rec); - bool add_committed_region(MemPointerRecord* rec); - bool remove_uncommitted_region(MemPointerRecord* rec); - bool remove_released_region(MemPointerRecord* rec); - - // split a reserved region to create a new memory region with specified base and size - bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size); - private: - bool insert_record(MemPointerRecord* rec); - bool insert_record_after(MemPointerRecord* rec); - - bool insert_reserved_region(MemPointerRecord* rec); - - // reset current position - inline void reset() { _pos = 0; } -#ifdef ASSERT - // check integrity of records on current reserved memory region. - bool check_reserved_region() { - VMMemRegion* reserved_region = (VMMemRegion*)current(); - assert(reserved_region != NULL && reserved_region->is_reserved_region(), - "Sanity check"); - // all committed regions that follow current reserved region, should all - // belong to the reserved region. - VMMemRegion* next_region = (VMMemRegion*)next(); - for (; next_region != NULL && next_region->is_committed_region(); - next_region = (VMMemRegion*)next() ) { - if(!reserved_region->contains_region(next_region)) { - return false; - } - } - return true; - } - - virtual bool is_dup_pointer(const MemPointer* ptr1, - const MemPointer* ptr2) const { - VMMemRegion* p1 = (VMMemRegion*)ptr1; - VMMemRegion* p2 = (VMMemRegion*)ptr2; - - if (p1->addr() != p2->addr()) return false; - if ((p1->flags() & MemPointerRecord::tag_masks) != - (p2->flags() & MemPointerRecord::tag_masks)) { - return false; - } - // we do see multiple commit/uncommit on the same memory, it is ok - return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc || - (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; - } -#endif -}; - -class MallocRecordIterator : public MemPointerArrayIterator { - private: - MemPointerArrayIteratorImpl _itr; - - - - public: - MallocRecordIterator(MemPointerArray* arr) : _itr(arr) { - } - - virtual MemPointer* current() const { -#ifdef ASSERT - MemPointer* cur_rec = _itr.current(); - if (cur_rec != NULL) { - MemPointer* prev_rec = _itr.peek_prev(); - MemPointer* next_rec = _itr.peek_next(); - assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order"); - assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order"); - } -#endif - return _itr.current(); - } - virtual MemPointer* next() { - MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next(); - // arena memory record is a special case, which we have to compare - // sequence number against its associated arena record. - if (next_rec != NULL && next_rec->is_arena_memory_record()) { - MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev(); - // if there is an associated arena record, it has to be previous - // record because of sorting order (by address) - NMT generates a pseudo address - // for arena's size record by offsetting arena's address, that guarantees - // the order of arena record and it's size record. - if (prev_rec != NULL && prev_rec->is_arena_record() && - next_rec->is_memory_record_of_arena(prev_rec)) { - if (prev_rec->seq() > next_rec->seq()) { - // Skip this arena memory record - // Two scenarios: - // - if the arena record is an allocation record, this early - // size record must be leftover by previous arena, - // and the last size record should have size = 0. - // - if the arena record is a deallocation record, this - // size record should be its cleanup record, which should - // also have size = 0. In other world, arena alway reset - // its size before gone (see Arena's destructor) - assert(next_rec->size() == 0, "size not reset"); - return _itr.next(); - } else { - assert(prev_rec->is_allocation_record(), - "Arena size record ahead of allocation record"); - } - } - } - return next_rec; - } - - MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; } - MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; } - void remove() { ShouldNotReachHere(); } - bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; } - bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; } -}; - -// collapse duplicated records. Eliminating duplicated records here, is much -// cheaper than during promotion phase. However, it does have limitation - it -// can only eliminate duplicated records within the generation, there are -// still chances seeing duplicated records during promotion. -// We want to use the record with higher sequence number, because it has -// more accurate callsite pc. -class VMRecordIterator : public MemPointerArrayIterator { - private: - MemPointerArrayIteratorImpl _itr; - - public: - VMRecordIterator(MemPointerArray* arr) : _itr(arr) { - MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); - MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); - while (next != NULL) { - assert(cur != NULL, "Sanity check"); - assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(), - "pre-sort order"); - - if (is_duplicated_record(cur, next)) { - _itr.next(); - next = (MemPointerRecord*)_itr.peek_next(); - } else { - break; - } - } - } - - virtual MemPointer* current() const { - return _itr.current(); - } - - // get next record, but skip the duplicated records - virtual MemPointer* next() { - MemPointerRecord* cur = (MemPointerRecord*)_itr.next(); - MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); - while (next != NULL) { - assert(cur != NULL, "Sanity check"); - assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(), - "pre-sort order"); - - if (is_duplicated_record(cur, next)) { - _itr.next(); - cur = next; - next = (MemPointerRecord*)_itr.peek_next(); - } else { - break; - } - } - return cur; - } - - MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; } - MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; } - void remove() { ShouldNotReachHere(); } - bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; } - bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; } - - private: - bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const { - bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags()); - assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record"); - return ret; - } -}; - -class StagingArea VALUE_OBJ_CLASS_SPEC { - private: - MemPointerArray* _malloc_data; - MemPointerArray* _vm_data; - - public: - StagingArea() : _malloc_data(NULL), _vm_data(NULL) { - init(); - } - - ~StagingArea() { - if (_malloc_data != NULL) delete _malloc_data; - if (_vm_data != NULL) delete _vm_data; - } - - MallocRecordIterator malloc_record_walker() { - return MallocRecordIterator(malloc_data()); - } - - VMRecordIterator virtual_memory_record_walker(); - - bool init(); - void clear() { - assert(_malloc_data != NULL && _vm_data != NULL, "Just check"); - _malloc_data->shrink(); - _malloc_data->clear(); - _vm_data->clear(); - } - - inline MemPointerArray* malloc_data() { return _malloc_data; } - inline MemPointerArray* vm_data() { return _vm_data; } -}; - -class MemBaseline; -class MemSnapshot : public CHeapObj { - private: - // the following two arrays contain records of all known lived memory blocks - // live malloc-ed memory pointers - MemPointerArray* _alloc_ptrs; - // live virtual memory pointers - MemPointerArray* _vm_ptrs; - - StagingArea _staging_area; - - // the lock to protect this snapshot - Monitor* _lock; - - // the number of instance classes - int _number_of_classes; - - NOT_PRODUCT(size_t _untracked_count;) - friend class MemBaseline; - - public: - MemSnapshot(); - virtual ~MemSnapshot(); - - // if we are running out of native memory - bool out_of_memory() { - return (_alloc_ptrs == NULL || - _staging_area.malloc_data() == NULL || - _staging_area.vm_data() == NULL || - _vm_ptrs == NULL || _lock == NULL || - _alloc_ptrs->out_of_memory() || - _vm_ptrs->out_of_memory()); - } - - // merge a per-thread memory recorder into staging area - bool merge(MemRecorder* rec); - // promote staged data to snapshot - bool promote(int number_of_classes); - - int number_of_classes() const { return _number_of_classes; } - - void wait(long timeout) { - assert(_lock != NULL, "Just check"); - MonitorLockerEx locker(_lock); - locker.wait(true, timeout); - } - - NOT_PRODUCT(void print_snapshot_stats(outputStream* st);) - NOT_PRODUCT(void check_staging_data();) - NOT_PRODUCT(void check_malloc_pointers();) - NOT_PRODUCT(bool has_allocation_record(address addr);) - // dump all virtual memory pointers in snapshot - DEBUG_ONLY( void dump_all_vm_pointers();) - - private: - // copy sequenced pointer from src to dest - void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src); - // assign a sequenced pointer to non-sequenced pointer - void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src); - - bool promote_malloc_records(MemPointerArrayIterator* itr); - bool promote_virtual_memory_records(MemPointerArrayIterator* itr); -}; - -#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP --- ./hotspot/src/share/vm/services/memTrackWorker.cpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,212 +0,0 @@ -/* - * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "runtime/threadCritical.hpp" -#include "services/memTracker.hpp" -#include "services/memTrackWorker.hpp" -#include "utilities/decoder.hpp" -#include "utilities/vmError.hpp" - - -void GenerationData::reset() { - _number_of_classes = 0; - while (_recorder_list != NULL) { - MemRecorder* tmp = _recorder_list; - _recorder_list = _recorder_list->next(); - MemTracker::release_thread_recorder(tmp); - } -} - -MemTrackWorker::MemTrackWorker(MemSnapshot* snapshot): _snapshot(snapshot) { - // create thread uses cgc thread type for now. We should revisit - // the option, or create new thread type. - _has_error = !os::create_thread(this, os::cgc_thread); - set_name("MemTrackWorker"); - - // initial generation circuit buffer - if (!has_error()) { - _head = _tail = 0; - for(int index = 0; index < MAX_GENERATIONS; index ++) { - ::new ((void*)&_gen[index]) GenerationData(); - } - } - NOT_PRODUCT(_sync_point_count = 0;) - NOT_PRODUCT(_merge_count = 0;) - NOT_PRODUCT(_last_gen_in_use = 0;) -} - -MemTrackWorker::~MemTrackWorker() { - for (int index = 0; index < MAX_GENERATIONS; index ++) { - _gen[index].reset(); - } -} - -void* MemTrackWorker::operator new(size_t size) throw() { - assert(false, "use nothrow version"); - return NULL; -} - -void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { - return allocate(size, false, mtNMT); -} - -void MemTrackWorker::start() { - os::start_thread(this); -} - -/* - * Native memory tracking worker thread loop: - * 1. merge one generation of memory recorders to staging area - * 2. promote staging data to memory snapshot - * - * This thread can run through safepoint. - */ - -void MemTrackWorker::run() { - assert(MemTracker::is_on(), "native memory tracking is off"); - this->initialize_thread_local_storage(); - this->record_stack_base_and_size(); - assert(_snapshot != NULL, "Worker should not be started"); - MemRecorder* rec; - unsigned long processing_generation = 0; - bool worker_idle = false; - - while (!MemTracker::shutdown_in_progress()) { - NOT_PRODUCT(_last_gen_in_use = generations_in_use();) - { - // take a recorder from earliest generation in buffer - ThreadCritical tc; - rec = _gen[_head].next_recorder(); - } - if (rec != NULL) { - if (rec->get_generation() != processing_generation || worker_idle) { - processing_generation = rec->get_generation(); - worker_idle = false; - MemTracker::set_current_processing_generation(processing_generation); - } - - // merge the recorder into staging area - if (!_snapshot->merge(rec)) { - MemTracker::shutdown(MemTracker::NMT_out_of_memory); - } else { - NOT_PRODUCT(_merge_count ++;) - } - MemTracker::release_thread_recorder(rec); - } else { - // no more recorder to merge, promote staging area - // to snapshot - if (_head != _tail) { - long number_of_classes; - { - ThreadCritical tc; - if (_gen[_head].has_more_recorder() || _head == _tail) { - continue; - } - number_of_classes = _gen[_head].number_of_classes(); - _gen[_head].reset(); - - // done with this generation, increment _head pointer - _head = (_head + 1) % MAX_GENERATIONS; - } - // promote this generation data to snapshot - if (!_snapshot->promote(number_of_classes)) { - // failed to promote, means out of memory - MemTracker::shutdown(MemTracker::NMT_out_of_memory); - } - } else { - // worker thread is idle - worker_idle = true; - MemTracker::report_worker_idle(); - _snapshot->wait(1000); - ThreadCritical tc; - // check if more data arrived - if (!_gen[_head].has_more_recorder()) { - _gen[_head].add_recorders(MemTracker::get_pending_recorders()); - } - } - } - } - assert(MemTracker::shutdown_in_progress(), "just check"); - - // transits to final shutdown - MemTracker::final_shutdown(); -} - -// at synchronization point, where 'safepoint visible' Java threads are blocked -// at a safepoint, and the rest of threads are blocked on ThreadCritical lock. -// The caller MemTracker::sync() already takes ThreadCritical before calling this -// method. -// -// Following tasks are performed: -// 1. add all recorders in pending queue to current generation -// 2. increase generation - -void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) { - NOT_PRODUCT(_sync_point_count ++;) - assert(count_recorder(rec) <= MemRecorder::_instance_count, - "pending queue has infinite loop"); - - bool out_of_generation_buffer = false; - // check shutdown state inside ThreadCritical - if (MemTracker::shutdown_in_progress()) return; - - _gen[_tail].set_number_of_classes(number_of_classes); - // append the recorders to the end of the generation - _gen[_tail].add_recorders(rec); - assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count, - "after add to current generation has infinite loop"); - // we have collected all recorders for this generation. If there is data, - // we need to increment _tail to start a new generation. - if (_gen[_tail].has_more_recorder() || _head == _tail) { - _tail = (_tail + 1) % MAX_GENERATIONS; - out_of_generation_buffer = (_tail == _head); - } - - if (out_of_generation_buffer) { - MemTracker::shutdown(MemTracker::NMT_out_of_generation); - } -} - -#ifndef PRODUCT -int MemTrackWorker::count_recorder(const MemRecorder* head) { - int count = 0; - while(head != NULL) { - count ++; - head = head->next(); - } - return count; -} - -int MemTrackWorker::count_pending_recorders() const { - int count = 0; - for (int index = 0; index < MAX_GENERATIONS; index ++) { - MemRecorder* head = _gen[index].peek(); - if (head != NULL) { - count += count_recorder(head); - } - } - return count; -} -#endif --- ./hotspot/src/share/vm/services/memTrackWorker.hpp Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP -#define SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP - -#include "memory/allocation.hpp" -#include "runtime/thread.hpp" -#include "services/memRecorder.hpp" - -// Maximum MAX_GENERATIONS generation data can be tracked. -#define MAX_GENERATIONS 512 - -class GenerationData VALUE_OBJ_CLASS_SPEC { - private: - int _number_of_classes; - MemRecorder* _recorder_list; - - public: - GenerationData(): _number_of_classes(0), _recorder_list(NULL) { } - - inline int number_of_classes() const { return _number_of_classes; } - inline void set_number_of_classes(long num) { _number_of_classes = num; } - - inline MemRecorder* next_recorder() { - if (_recorder_list == NULL) { - return NULL; - } else { - MemRecorder* tmp = _recorder_list; - _recorder_list = _recorder_list->next(); - return tmp; - } - } - - inline bool has_more_recorder() const { - return (_recorder_list != NULL); - } - - // add recorders to this generation - void add_recorders(MemRecorder* head) { - if (head != NULL) { - if (_recorder_list == NULL) { - _recorder_list = head; - } else { - MemRecorder* tmp = _recorder_list; - for (; tmp->next() != NULL; tmp = tmp->next()); - tmp->set_next(head); - } - } - } - - void reset(); - - NOT_PRODUCT(MemRecorder* peek() const { return _recorder_list; }) -}; - -class MemTrackWorker : public NamedThread { - private: - // circular buffer. This buffer contains generation data to be merged into global - // snaphsot. - // Each slot holds a generation - GenerationData _gen[MAX_GENERATIONS]; - int _head, _tail; // head and tail pointers to above circular buffer - - bool _has_error; - - MemSnapshot* _snapshot; - - public: - MemTrackWorker(MemSnapshot* snapshot); - ~MemTrackWorker(); - _NOINLINE_ void* operator new(size_t size) throw(); - _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw(); - - void start(); - void run(); - - inline bool has_error() const { return _has_error; } - - // task at synchronization point - void at_sync_point(MemRecorder* pending_recorders, int number_of_classes); - - // for debugging purpose, they are not thread safe. - NOT_PRODUCT(static int count_recorder(const MemRecorder* head);) - NOT_PRODUCT(int count_pending_recorders() const;) - - NOT_PRODUCT(int _sync_point_count;) - NOT_PRODUCT(int _merge_count;) - NOT_PRODUCT(int _last_gen_in_use;) - - // how many generations are queued - inline int generations_in_use() const { - return (_tail >= _head ? (_tail - _head + 1) : (MAX_GENERATIONS - (_head - _tail) + 1)); - } -}; - -#endif // SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP --- ./hotspot/src/share/vm/services/memTracker.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/memTracker.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,861 +23,304 @@ */ #include "precompiled.hpp" -#include "oops/instanceKlass.hpp" -#include "runtime/atomic.hpp" -#include "runtime/interfaceSupport.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/safepoint.hpp" -#include "runtime/threadCritical.hpp" -#include "runtime/vm_operations.hpp" -#include "services/memPtr.hpp" +#include "runtime/mutex.hpp" +#include "services/memBaseline.hpp" #include "services/memReporter.hpp" +#include "services/mallocTracker.inline.hpp" #include "services/memTracker.hpp" -#include "utilities/decoder.hpp" #include "utilities/defaultStream.hpp" -#include "utilities/globalDefinitions.hpp" -bool NMT_track_callsite = false; +#ifdef SOLARIS + volatile bool NMT_stack_walkable = false; +#else + volatile bool NMT_stack_walkable = true; +#endif -// walk all 'known' threads at NMT sync point, and collect their recorders -void SyncThreadRecorderClosure::do_thread(Thread* thread) { - assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); - if (thread->is_Java_thread()) { - JavaThread* javaThread = (JavaThread*)thread; - MemRecorder* recorder = javaThread->get_recorder(); - if (recorder != NULL) { - MemTracker::enqueue_pending_recorder(recorder); - javaThread->set_recorder(NULL); +volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown; +NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown; + +MemBaseline MemTracker::_baseline; +Mutex* MemTracker::_query_lock = NULL; +bool MemTracker::_is_nmt_env_valid = true; + + +NMT_TrackingLevel MemTracker::init_tracking_level() { + NMT_TrackingLevel level = NMT_off; + char buf[64]; + char nmt_option[64]; + jio_snprintf(buf, sizeof(buf), "NMT_LEVEL_%d", os::current_process_id()); + if (os::getenv(buf, nmt_option, sizeof(nmt_option))) { + if (strcmp(nmt_option, "summary") == 0) { + level = NMT_summary; + } else if (strcmp(nmt_option, "detail") == 0) { +#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED + level = NMT_detail; +#else + level = NMT_summary; +#endif // PLATFORM_NATIVE_STACK_WALKING_SUPPORTED + } else if (strcmp(nmt_option, "off") != 0) { + // The option value is invalid + _is_nmt_env_valid = false; } + + // Remove the environment variable to avoid leaking to child processes + os::unsetenv(buf); } - _thread_count ++; + + // Construct NativeCallStack::EMPTY_STACK. It may get constructed twice, + // but it is benign, the results are the same. + ::new ((void*)&NativeCallStack::EMPTY_STACK) NativeCallStack(0, false); + + if (!MallocTracker::initialize(level) || + !VirtualMemoryTracker::initialize(level)) { + level = NMT_off; + } + return level; } - -MemRecorder* volatile MemTracker::_global_recorder = NULL; -MemSnapshot* MemTracker::_snapshot = NULL; -MemBaseline MemTracker::_baseline; -Mutex* MemTracker::_query_lock = NULL; -MemRecorder* volatile MemTracker::_merge_pending_queue = NULL; -MemRecorder* volatile MemTracker::_pooled_recorders = NULL; -MemTrackWorker* MemTracker::_worker_thread = NULL; -int MemTracker::_sync_point_skip_count = 0; -MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off; -volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited; -MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; -int MemTracker::_thread_count = 255; -volatile jint MemTracker::_pooled_recorder_count = 0; -volatile unsigned long MemTracker::_processing_generation = 0; -volatile bool MemTracker::_worker_thread_idle = false; -volatile jint MemTracker::_pending_op_count = 0; -volatile bool MemTracker::_slowdown_calling_thread = false; -debug_only(intx MemTracker::_main_thread_tid = 0;) -NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) - -void MemTracker::init_tracking_options(const char* option_line) { - _tracking_level = NMT_off; - if (strcmp(option_line, "=summary") == 0) { - _tracking_level = NMT_summary; - } else if (strcmp(option_line, "=detail") == 0) { - // detail relies on a stack-walking ability that may not - // be available depending on platform and/or compiler flags -#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED - _tracking_level = NMT_detail; -#else - jio_fprintf(defaultStream::error_stream(), - "NMT detail is not supported on this platform. Using NMT summary instead.\n"); - _tracking_level = NMT_summary; -#endif - } else if (strcmp(option_line, "=off") != 0) { - vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL); - } -} - -// first phase of bootstrapping, when VM is still in single-threaded mode. -void MemTracker::bootstrap_single_thread() { - if (_tracking_level > NMT_off) { - assert(_state == NMT_uninited, "wrong state"); - - // NMT is not supported with UseMallocOnly is on. NMT can NOT - // handle the amount of malloc data without significantly impacting - // runtime performance when this flag is on. - if (UseMallocOnly) { - shutdown(NMT_use_malloc_only); +void MemTracker::init() { + NMT_TrackingLevel level = tracking_level(); + if (level >= NMT_summary) { + if (!VirtualMemoryTracker::late_initialize(level)) { + shutdown(); return; } - _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock"); + // Already OOM. It is unlikely, but still have to handle it. if (_query_lock == NULL) { - shutdown(NMT_out_of_memory); - return; - } - - debug_only(_main_thread_tid = os::current_thread_id();) - _state = NMT_bootstrapping_single_thread; - NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); - } -} - -// second phase of bootstrapping, when VM is about to or already entered multi-theaded mode. -void MemTracker::bootstrap_multi_thread() { - if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) { - // create nmt lock for multi-thread execution - assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); - _state = NMT_bootstrapping_multi_thread; - NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); - } -} - -// fully start nmt -void MemTracker::start() { - // Native memory tracking is off from command line option - if (_tracking_level == NMT_off || shutdown_in_progress()) return; - - assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); - assert(_state == NMT_bootstrapping_multi_thread, "wrong state"); - - _snapshot = new (std::nothrow)MemSnapshot(); - if (_snapshot != NULL) { - if (!_snapshot->out_of_memory() && start_worker(_snapshot)) { - _state = NMT_started; - NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); - return; - } - - delete _snapshot; - _snapshot = NULL; - } - - // fail to start native memory tracking, shut it down - shutdown(NMT_initialization); -} - -/** - * Shutting down native memory tracking. - * We can not shutdown native memory tracking immediately, so we just - * setup shutdown pending flag, every native memory tracking component - * should orderly shut itself down. - * - * The shutdown sequences: - * 1. MemTracker::shutdown() sets MemTracker to shutdown pending state - * 2. Worker thread calls MemTracker::final_shutdown(), which transites - * MemTracker to final shutdown state. - * 3. At sync point, MemTracker does final cleanup, before sets memory - * tracking level to off to complete shutdown. - */ -void MemTracker::shutdown(ShutdownReason reason) { - if (_tracking_level == NMT_off) return; - - if (_state <= NMT_bootstrapping_single_thread) { - // we still in single thread mode, there is not contention - _state = NMT_shutdown_pending; - _reason = reason; - } else { - // we want to know who initialized shutdown - if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending, - (jint*)&_state, (jint)NMT_started)) { - _reason = reason; + shutdown(); } } } -// final phase of shutdown -void MemTracker::final_shutdown() { - // delete all pending recorders and pooled recorders - delete_all_pending_recorders(); - delete_all_pooled_recorders(); - - { - // shared baseline and snapshot are the only objects needed to - // create query results - MutexLockerEx locker(_query_lock, true); - // cleanup baseline data and snapshot - _baseline.clear(); - delete _snapshot; - _snapshot = NULL; +bool MemTracker::check_launcher_nmt_support(const char* value) { + if (strcmp(value, "=detail") == 0) { +#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED + jio_fprintf(defaultStream::error_stream(), + "NMT detail is not supported on this platform. Using NMT summary instead.\n"); + if (MemTracker::tracking_level() != NMT_summary) { + return false; + } +#else + if (MemTracker::tracking_level() != NMT_detail) { + return false; + } +#endif + } else if (strcmp(value, "=summary") == 0) { + if (MemTracker::tracking_level() != NMT_summary) { + return false; + } + } else if (strcmp(value, "=off") == 0) { + if (MemTracker::tracking_level() != NMT_off) { + return false; + } + } else { + _is_nmt_env_valid = false; } - // shutdown shared decoder instance, since it is only - // used by native memory tracking so far. - Decoder::shutdown(); - - MemTrackWorker* worker = NULL; - { - ThreadCritical tc; - // can not delete worker inside the thread critical - if (_worker_thread != NULL && Thread::current() == _worker_thread) { - worker = _worker_thread; - _worker_thread = NULL; - } - } - if (worker != NULL) { - delete worker; - } - _state = NMT_final_shutdown; + return true; } -// delete all pooled recorders -void MemTracker::delete_all_pooled_recorders() { - // free all pooled recorders - MemRecorder* volatile cur_head = _pooled_recorders; - if (cur_head != NULL) { - MemRecorder* null_ptr = NULL; - while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, - (void*)&_pooled_recorders, (void*)cur_head)) { - cur_head = _pooled_recorders; - } - if (cur_head != NULL) { - delete cur_head; - _pooled_recorder_count = 0; +bool MemTracker::verify_nmt_option() { + return _is_nmt_env_valid; +} + +void* MemTracker::malloc_base(void* memblock) { + return MallocTracker::get_base(memblock); +} + +void Tracker::record(address addr, size_t size) { + if (MemTracker::tracking_level() < NMT_summary) return; + switch(_type) { + case uncommit: + VirtualMemoryTracker::remove_uncommitted_region(addr, size); + break; + case release: + VirtualMemoryTracker::remove_released_region(addr, size); + break; + default: + ShouldNotReachHere(); + } +} + + +// Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock +void MemTracker::shutdown() { + // We can only shutdown NMT to minimal tracking level if it is ever on. + if (tracking_level () > NMT_minimal) { + transition_to(NMT_minimal); + } +} + +bool MemTracker::transition_to(NMT_TrackingLevel level) { + NMT_TrackingLevel current_level = tracking_level(); + + assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off"); + + if (current_level == level) { + return true; + } else if (current_level > level) { + // Downgrade tracking level, we want to lower the tracking level first + _tracking_level = level; + // Make _tracking_level visible immediately. + OrderAccess::fence(); + VirtualMemoryTracker::transition(current_level, level); + MallocTracker::transition(current_level, level); + } else { + // Upgrading tracking level is not supported and has never been supported. + // Allocating and deallocating malloc tracking structures is not thread safe and + // leads to inconsistencies unless a lot coarser locks are added. + } + return true; +} + +void MemTracker::report(bool summary_only, outputStream* output) { + assert(output != NULL, "No output stream"); + MemBaseline baseline; + if (baseline.baseline(summary_only)) { + if (summary_only) { + MemSummaryReporter rpt(baseline, output); + rpt.report(); + } else { + MemDetailReporter rpt(baseline, output); + rpt.report(); } } } -// delete all recorders in pending queue -void MemTracker::delete_all_pending_recorders() { - // free all pending recorders - MemRecorder* pending_head = get_pending_recorders(); - if (pending_head != NULL) { - delete pending_head; - } -} +// This is a walker to gather malloc site hashtable statistics, +// the result is used for tuning. +class StatisticsWalker : public MallocSiteWalker { + private: + enum Threshold { + // aggregates statistics over this threshold into one + // line item. + report_threshold = 20 + }; -/* - * retrieve per-thread recorder of specified thread. - * if thread == NULL, it means global recorder - */ -MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) { - if (shutdown_in_progress()) return NULL; + private: + // Number of allocation sites that have all memory freed + int _empty_entries; + // Total number of allocation sites, include empty sites + int _total_entries; + // Number of captured call stack distribution + int _stack_depth_distribution[NMT_TrackingStackDepth]; + // Hash distribution + int _hash_distribution[report_threshold]; + // Number of hash buckets that have entries over the threshold + int _bucket_over_threshold; - MemRecorder* rc; - if (thread == NULL) { - rc = _global_recorder; - } else { - rc = thread->get_recorder(); + // The hash bucket that walker is currently walking + int _current_hash_bucket; + // The length of current hash bucket + int _current_bucket_length; + // Number of hash buckets that are not empty + int _used_buckets; + // Longest hash bucket length + int _longest_bucket_length; + + public: + StatisticsWalker() : _empty_entries(0), _total_entries(0) { + int index = 0; + for (index = 0; index < NMT_TrackingStackDepth; index ++) { + _stack_depth_distribution[index] = 0; + } + for (index = 0; index < report_threshold; index ++) { + _hash_distribution[index] = 0; + } + _bucket_over_threshold = 0; + _longest_bucket_length = 0; + _current_hash_bucket = -1; + _current_bucket_length = 0; + _used_buckets = 0; } - if (rc != NULL && rc->is_full()) { - enqueue_pending_recorder(rc); - rc = NULL; + virtual bool at(const MallocSite* e) { + if (e->size() == 0) _empty_entries ++; + _total_entries ++; + + // stack depth distrubution + int frames = e->call_stack()->frames(); + _stack_depth_distribution[frames - 1] ++; + + // hash distribution + int hash_bucket = e->hash() % MallocSiteTable::hash_buckets(); + if (_current_hash_bucket == -1) { + _current_hash_bucket = hash_bucket; + _current_bucket_length = 1; + } else if (_current_hash_bucket == hash_bucket) { + _current_bucket_length ++; + } else { + record_bucket_length(_current_bucket_length); + _current_hash_bucket = hash_bucket; + _current_bucket_length = 1; + } + return true; } - if (rc == NULL) { - rc = get_new_or_pooled_instance(); - if (thread == NULL) { - _global_recorder = rc; - } else { - thread->set_recorder(rc); - } - } - return rc; -} - -/* - * get a per-thread recorder from pool, or create a new one if - * there is not one available. - */ -MemRecorder* MemTracker::get_new_or_pooled_instance() { - MemRecorder* cur_head = const_cast (_pooled_recorders); - if (cur_head == NULL) { - MemRecorder* rec = new (std::nothrow)MemRecorder(); - if (rec == NULL || rec->out_of_memory()) { - shutdown(NMT_out_of_memory); - if (rec != NULL) { - delete rec; - rec = NULL; - } - } - return rec; - } else { - MemRecorder* next_head = cur_head->next(); - if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders, - (void*)cur_head)) { - return get_new_or_pooled_instance(); - } - cur_head->set_next(NULL); - Atomic::dec(&_pooled_recorder_count); - cur_head->set_generation(); - return cur_head; - } -} - -/* - * retrieve all recorders in pending queue, and empty the queue - */ -MemRecorder* MemTracker::get_pending_recorders() { - MemRecorder* cur_head = const_cast(_merge_pending_queue); - MemRecorder* null_ptr = NULL; - while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue, - (void*)cur_head)) { - cur_head = const_cast(_merge_pending_queue); - } - NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count)); - return cur_head; -} - -/* - * release a recorder to recorder pool. - */ -void MemTracker::release_thread_recorder(MemRecorder* rec) { - assert(rec != NULL, "null recorder"); - // we don't want to pool too many recorders - rec->set_next(NULL); - if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) { - delete rec; - return; + // walk completed + void completed() { + record_bucket_length(_current_bucket_length); } - rec->clear(); - MemRecorder* cur_head = const_cast(_pooled_recorders); - rec->set_next(cur_head); - while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders, - (void*)cur_head)) { - cur_head = const_cast(_pooled_recorders); - rec->set_next(cur_head); - } - Atomic::inc(&_pooled_recorder_count); -} - -// write a record to proper recorder. No lock can be taken from this method -// down. -void MemTracker::write_tracking_record(address addr, MEMFLAGS flags, - size_t size, jint seq, address pc, JavaThread* thread) { - - MemRecorder* rc = get_thread_recorder(thread); - if (rc != NULL) { - rc->record(addr, flags, size, seq, pc); + void report_statistics(outputStream* out) { + int index; + out->print_cr("Malloc allocation site table:"); + out->print_cr("\tTotal entries: %d", _total_entries); + out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries); + out->print_cr(" "); + out->print_cr("Hash distribution:"); + if (_used_buckets < MallocSiteTable::hash_buckets()) { + out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets)); } -} - -/** - * enqueue a recorder to pending queue - */ -void MemTracker::enqueue_pending_recorder(MemRecorder* rec) { - assert(rec != NULL, "null recorder"); - - // we are shutting down, so just delete it - if (shutdown_in_progress()) { - rec->set_next(NULL); - delete rec; - return; - } - - MemRecorder* cur_head = const_cast(_merge_pending_queue); - rec->set_next(cur_head); - while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue, - (void*)cur_head)) { - cur_head = const_cast(_merge_pending_queue); - rec->set_next(cur_head); - } - NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);) -} - -/* - * The method is called at global safepoint - * during it synchronization process. - * 1. enqueue all JavaThreads' per-thread recorders - * 2. enqueue global recorder - * 3. retrieve all pending recorders - * 4. reset global sequence number generator - * 5. call worker's sync - */ -#define MAX_SAFEPOINTS_TO_SKIP 128 -#define SAFE_SEQUENCE_THRESHOLD 30 -#define HIGH_GENERATION_THRESHOLD 60 -#define MAX_RECORDER_THREAD_RATIO 30 -#define MAX_RECORDER_PER_THREAD 100 - -void MemTracker::sync() { - assert(_tracking_level > NMT_off, "NMT is not enabled"); - assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); - - // Some GC tests hit large number of safepoints in short period of time - // without meaningful activities. We should prevent going to - // sync point in these cases, which can potentially exhaust generation buffer. - // Here is the factots to determine if we should go into sync point: - // 1. not to overflow sequence number - // 2. if we are in danger to overflow generation buffer - // 3. how many safepoints we already skipped sync point - if (_state == NMT_started) { - // worker thread is not ready, no one can manage generation - // buffer, so skip this safepoint - if (_worker_thread == NULL) return; - - if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) { - int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint; - int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS; - if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) { - _sync_point_skip_count ++; - return; + for (index = 0; index < report_threshold; index ++) { + if (_hash_distribution[index] != 0) { + if (index == 0) { + out->print_cr(" %d entry: %d", 1, _hash_distribution[0]); + } else if (index < 9) { // single digit + out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]); + } else { + out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]); + } } } - { - // This method is running at safepoint, with ThreadCritical lock, - // it should guarantee that NMT is fully sync-ed. - ThreadCritical tc; - - // We can NOT execute NMT sync-point if there are pending tracking ops. - if (_pending_op_count == 0) { - SequenceGenerator::reset(); - _sync_point_skip_count = 0; - - // walk all JavaThreads to collect recorders - SyncThreadRecorderClosure stc; - Threads::threads_do(&stc); - - _thread_count = stc.get_thread_count(); - MemRecorder* pending_recorders = get_pending_recorders(); - - if (_global_recorder != NULL) { - _global_recorder->set_next(pending_recorders); - pending_recorders = _global_recorder; - _global_recorder = NULL; - } - - // see if NMT has too many outstanding recorder instances, it usually - // means that worker thread is lagging behind in processing them. - if (!AutoShutdownNMT) { - _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count); - } else { - // If auto shutdown is on, enforce MAX_RECORDER_PER_THREAD threshold to prevent OOM - if (MemRecorder::_instance_count >= _thread_count * MAX_RECORDER_PER_THREAD) { - shutdown(NMT_out_of_memory); - } - } - - // check _worker_thread with lock to avoid racing condition - if (_worker_thread != NULL) { - _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes()); - } - assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); - } else { - _sync_point_skip_count ++; + if (_bucket_over_threshold > 0) { + out->print_cr(" >%d entries: %d", report_threshold, _bucket_over_threshold); + } + out->print_cr("most entries: %d", _longest_bucket_length); + out->print_cr(" "); + out->print_cr("Call stack depth distribution:"); + for (index = 0; index < NMT_TrackingStackDepth; index ++) { + if (_stack_depth_distribution[index] > 0) { + out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]); } } } - // now, it is the time to shut whole things off - if (_state == NMT_final_shutdown) { - // walk all JavaThreads to delete all recorders - SyncThreadRecorderClosure stc; - Threads::threads_do(&stc); - // delete global recorder - { - ThreadCritical tc; - if (_global_recorder != NULL) { - delete _global_recorder; - _global_recorder = NULL; - } + private: + void record_bucket_length(int length) { + _used_buckets ++; + if (length <= report_threshold) { + _hash_distribution[length - 1] ++; + } else { + _bucket_over_threshold ++; } - MemRecorder* pending_recorders = get_pending_recorders(); - if (pending_recorders != NULL) { - delete pending_recorders; - } - // try at a later sync point to ensure MemRecorder instance drops to zero to - // completely shutdown NMT - if (MemRecorder::_instance_count == 0) { - _state = NMT_shutdown; - _tracking_level = NMT_off; - } + _longest_bucket_length = MAX2(_longest_bucket_length, length); } +}; + + +void MemTracker::tuning_statistics(outputStream* out) { + // NMT statistics + StatisticsWalker walker; + MallocSiteTable::walk_malloc_site(&walker); + walker.completed(); + + out->print_cr("Native Memory Tracking Statistics:"); + out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets()); + out->print_cr(" Tracking stack depth: %d", NMT_TrackingStackDepth); + NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());) + out->print_cr(" "); + walker.report_statistics(out); } -/* - * Start worker thread. - */ -bool MemTracker::start_worker(MemSnapshot* snapshot) { - assert(_worker_thread == NULL && _snapshot != NULL, "Just Check"); - _worker_thread = new (std::nothrow) MemTrackWorker(snapshot); - if (_worker_thread == NULL) { - return false; - } else if (_worker_thread->has_error()) { - delete _worker_thread; - _worker_thread = NULL; - return false; - } - _worker_thread->start(); - return true; -} - -/* - * We need to collect a JavaThread's per-thread recorder - * before it exits. - */ -void MemTracker::thread_exiting(JavaThread* thread) { - if (is_on()) { - MemRecorder* rec = thread->get_recorder(); - if (rec != NULL) { - enqueue_pending_recorder(rec); - thread->set_recorder(NULL); - } - } -} - -// baseline current memory snapshot -bool MemTracker::baseline() { - MutexLocker lock(_query_lock); - MemSnapshot* snapshot = get_snapshot(); - if (snapshot != NULL) { - return _baseline.baseline(*snapshot, false); - } - return false; -} - -// print memory usage from current snapshot -bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { - MemBaseline baseline; - MutexLocker lock(_query_lock); - MemSnapshot* snapshot = get_snapshot(); - if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { - BaselineReporter reporter(out, unit); - reporter.report_baseline(baseline, summary_only); - return true; - } - return false; -} - -// Whitebox API for blocking until the current generation of NMT data has been merged -bool MemTracker::wbtest_wait_for_data_merge() { - // NMT can't be shutdown while we're holding _query_lock - MutexLocker lock(_query_lock); - assert(_worker_thread != NULL, "Invalid query"); - // the generation at query time, so NMT will spin till this generation is processed - unsigned long generation_at_query_time = SequenceGenerator::current_generation(); - unsigned long current_processing_generation = _processing_generation; - // if generation counter overflown - bool generation_overflown = (generation_at_query_time < current_processing_generation); - long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; - // spin - while (!shutdown_in_progress()) { - if (!generation_overflown) { - if (current_processing_generation > generation_at_query_time) { - return true; - } - } else { - assert(generations_to_wrap >= 0, "Sanity check"); - long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; - assert(current_generations_to_wrap >= 0, "Sanity check"); - // to overflow an unsigned long should take long time, so to_wrap check should be sufficient - if (current_generations_to_wrap > generations_to_wrap && - current_processing_generation > generation_at_query_time) { - return true; - } - } - - // if worker thread is idle, but generation is not advancing, that means - // there is not safepoint to let NMT advance generation, force one. - if (_worker_thread_idle) { - VM_ForceSafepoint vfs; - VMThread::execute(&vfs); - } - MemSnapshot* snapshot = get_snapshot(); - if (snapshot == NULL) { - return false; - } - snapshot->wait(1000); - current_processing_generation = _processing_generation; - } - // We end up here if NMT is shutting down before our data has been merged - return false; -} - -// compare memory usage between current snapshot and baseline -bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { - MutexLocker lock(_query_lock); - if (_baseline.baselined()) { - MemBaseline baseline; - MemSnapshot* snapshot = get_snapshot(); - if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { - BaselineReporter reporter(out, unit); - reporter.diff_baselines(baseline, _baseline, summary_only); - return true; - } - } - return false; -} - -#ifndef PRODUCT -void MemTracker::walk_stack(int toSkip, char* buf, int len) { - int cur_len = 0; - char tmp[1024]; - address pc; - - while (cur_len < len) { - pc = os::get_caller_pc(toSkip + 1); - if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) { - jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp); - cur_len = (int)strlen(buf); - } else { - buf[cur_len] = '\0'; - break; - } - toSkip ++; - } -} - -void MemTracker::print_tracker_stats(outputStream* st) { - st->print_cr("\nMemory Tracker Stats:"); - st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num()); - st->print_cr("\tthead count = %d", _thread_count); - st->print_cr("\tArena instance = %d", Arena::_instance_count); - st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count); - st->print_cr("\tqueued recorder count = %d", _pending_recorder_count); - st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count); - if (_worker_thread != NULL) { - st->print_cr("\tWorker thread:"); - st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count); - st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders()); - st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count); - } else { - st->print_cr("\tWorker thread is not started"); - } - st->print_cr(" "); - - if (_snapshot != NULL) { - _snapshot->print_snapshot_stats(st); - } else { - st->print_cr("No snapshot"); - } -} -#endif - - -// Tracker Implementation - -/* - * Create a tracker. - * This is a fairly complicated constructor, as it has to make two important decisions: - * 1) Does it need to take ThreadCritical lock to write tracking record - * 2) Does it need to pre-reserve a sequence number for the tracking record - * - * The rules to determine if ThreadCritical is needed: - * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM - * still in single thread mode. - * 2. For all threads other than JavaThread, ThreadCritical is needed - * to write to recorders to global recorder. - * 3. For JavaThreads that are no longer visible by safepoint, also - * need to take ThreadCritical and records are written to global - * recorders, since these threads are NOT walked by Threads.do_thread(). - * 4. JavaThreads that are running in safepoint-safe states do not stop - * for safepoints, ThreadCritical lock should be taken to write - * memory records. - * 5. JavaThreads that are running in VM state do not need any lock and - * records are written to per-thread recorders. - * 6. For a thread has yet to attach VM 'Thread', they need to take - * ThreadCritical to write to global recorder. - * - * The memory operations that need pre-reserve sequence numbers: - * The memory operations that "release" memory blocks and the - * operations can fail, need to pre-reserve sequence number. They - * are realloc, uncommit and release. - * - * The reason for pre-reserve sequence number, is to prevent race condition: - * Thread 1 Thread 2 - * - * - * - * - * if Thread 2 happens to obtain the memory address Thread 1 just released, - * then NMT can mistakenly report the memory is free. - * - * Noticeably, free() does not need pre-reserve sequence number, because the call - * does not fail, so we can alway write "release" record before the memory is actaully - * freed. - * - * For realloc, uncommit and release, following coding pattern should be used: - * - * MemTracker::Tracker tkr = MemTracker::get_realloc_tracker(); - * ptr = ::realloc(...); - * if (ptr == NULL) { - * tkr.record(...) - * } else { - * tkr.discard(); - * } - * - * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); - * if (uncommit(...)) { - * tkr.record(...); - * } else { - * tkr.discard(); - * } - * - * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); - * if (release(...)) { - * tkr.record(...); - * } else { - * tkr.discard(); - * } - * - * Since pre-reserved sequence number is only good for the generation that it is acquired, - * when there is pending Tracker that reserved sequence number, NMT sync-point has - * to be skipped to prevent from advancing generation. This is done by inc and dec - * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped. - * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads - * that honor safepoints, safepoint can not occur during the memory operations, so the - * pre-reserved sequence number won't cross the generation boundry. - */ -MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) { - _op = NoOp; - _seq = 0; - if (MemTracker::is_on()) { - _java_thread = NULL; - _op = op; - - // figure out if ThreadCritical lock is needed to write this operation - // to MemTracker - if (MemTracker::is_single_threaded_bootstrap()) { - thr = NULL; - } else if (thr == NULL) { - // don't use Thread::current(), since it is possible that - // the calling thread has yet to attach to VM 'Thread', - // which will result assertion failure - thr = ThreadLocalStorage::thread(); - } - - if (thr != NULL) { - // Check NMT load - MemTracker::check_NMT_load(thr); - - if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) { - _java_thread = (JavaThread*)thr; - JavaThreadState state = _java_thread->thread_state(); - // JavaThreads that are safepoint safe, can run through safepoint, - // so ThreadCritical is needed to ensure no threads at safepoint create - // new records while the records are being gathered and the sequence number is changing - _need_thread_critical_lock = - SafepointSynchronize::safepoint_safe(_java_thread, state); - } else { - _need_thread_critical_lock = true; - } - } else { - _need_thread_critical_lock - = !MemTracker::is_single_threaded_bootstrap(); - } - - // see if we need to pre-reserve sequence number for this operation - if (_op == Realloc || _op == Uncommit || _op == Release) { - if (_need_thread_critical_lock) { - ThreadCritical tc; - MemTracker::inc_pending_op_count(); - _seq = SequenceGenerator::next(); - } else { - // for the threads that honor safepoints, no safepoint can occur - // during the lifespan of tracker, so we don't need to increase - // pending op count. - _seq = SequenceGenerator::next(); - } - } - } -} - -void MemTracker::Tracker::discard() { - if (MemTracker::is_on() && _seq != 0) { - if (_need_thread_critical_lock) { - ThreadCritical tc; - MemTracker::dec_pending_op_count(); - } - _seq = 0; - } -} - - -void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size, - MEMFLAGS flags, address pc) { - assert(old_addr != NULL && new_addr != NULL, "Sanity check"); - assert(_op == Realloc || _op == NoOp, "Wrong call"); - if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { - assert(_seq > 0, "Need pre-reserve sequence number"); - if (_need_thread_critical_lock) { - ThreadCritical tc; - // free old address, use pre-reserved sequence number - MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), - 0, _seq, pc, _java_thread); - MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), - size, SequenceGenerator::next(), pc, _java_thread); - // decrement MemTracker pending_op_count - MemTracker::dec_pending_op_count(); - } else { - // free old address, use pre-reserved sequence number - MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), - 0, _seq, pc, _java_thread); - MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), - size, SequenceGenerator::next(), pc, _java_thread); - } - _seq = 0; - } -} - -void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) { - // OOM already? - if (addr == NULL) return; - - if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { - bool pre_reserved_seq = (_seq != 0); - address pc = CALLER_CALLER_PC; - MEMFLAGS orig_flags = flags; - - // or the tagging flags - switch(_op) { - case Malloc: - flags |= MemPointerRecord::malloc_tag(); - break; - case Free: - flags = MemPointerRecord::free_tag(); - break; - case Realloc: - fatal("Use the other Tracker::record()"); - break; - case Reserve: - case ReserveAndCommit: - flags |= MemPointerRecord::virtual_memory_reserve_tag(); - break; - case Commit: - flags = MemPointerRecord::virtual_memory_commit_tag(); - break; - case Type: - flags |= MemPointerRecord::virtual_memory_type_tag(); - break; - case Uncommit: - assert(pre_reserved_seq, "Need pre-reserve sequence number"); - flags = MemPointerRecord::virtual_memory_uncommit_tag(); - break; - case Release: - assert(pre_reserved_seq, "Need pre-reserve sequence number"); - flags = MemPointerRecord::virtual_memory_release_tag(); - break; - case ArenaSize: - // a bit of hack here, add a small postive offset to arena - // address for its size record, so the size record is sorted - // right after arena record. - flags = MemPointerRecord::arena_size_tag(); - addr += sizeof(void*); - break; - case StackRelease: - flags = MemPointerRecord::virtual_memory_release_tag(); - break; - default: - ShouldNotReachHere(); - } - - // write memory tracking record - if (_need_thread_critical_lock) { - ThreadCritical tc; - if (_seq == 0) _seq = SequenceGenerator::next(); - MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); - if (_op == ReserveAndCommit) { - MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), - size, SequenceGenerator::next(), pc, _java_thread); - } - if (pre_reserved_seq) MemTracker::dec_pending_op_count(); - } else { - if (_seq == 0) _seq = SequenceGenerator::next(); - MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); - if (_op == ReserveAndCommit) { - MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), - size, SequenceGenerator::next(), pc, _java_thread); - } - } - _seq = 0; - } -} - --- ./hotspot/src/share/vm/services/memTracker.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/memTracker.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,574 +25,297 @@ #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP #define SHARE_VM_SERVICES_MEM_TRACKER_HPP -#include "utilities/macros.hpp" +#include "services/nmtCommon.hpp" +#include "utilities/nativeCallStack.hpp" + #if !INCLUDE_NMT -#include "utilities/ostream.hpp" +#define CURRENT_PC NativeCallStack::EMPTY_STACK +#define CALLER_PC NativeCallStack::EMPTY_STACK -class BaselineOutputer : public StackObj { - -}; - -class BaselineTTYOutputer : public BaselineOutputer { - public: - BaselineTTYOutputer(outputStream* st) { } +class Tracker : public StackObj { + public: + Tracker() { } + void record(address addr, size_t size) { } }; class MemTracker : AllStatic { - public: - enum ShutdownReason { - NMT_shutdown_none, // no shutdown requested - NMT_shutdown_user, // user requested shutdown - NMT_normal, // normal shutdown, process exit - NMT_out_of_memory, // shutdown due to out of memory - NMT_initialization, // shutdown due to initialization failure - NMT_use_malloc_only, // can not combine NMT with UseMallocOnly flag - NMT_error_reporting, // shutdown by vmError::report_and_die() - NMT_out_of_generation, // running out of generation queue - NMT_sequence_overflow // overflow the sequence number - }; + public: + static inline NMT_TrackingLevel tracking_level() { return NMT_off; } + static inline void shutdown() { } + static inline void init() { } + static bool check_launcher_nmt_support(const char* value) { return true; } + static bool verify_nmt_option() { return true; } - class Tracker { - public: - void discard() { } + static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag, + const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; } + static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; } + static inline size_t malloc_header_size(void* memblock) { return 0; } + static inline void* malloc_base(void* memblock) { return memblock; } + static inline void* record_free(void* memblock) { return memblock; } - void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { } - void record(address old_addr, address new_addr, size_t size, - MEMFLAGS flags, address pc = NULL) { } - }; + static inline void record_new_arena(MEMFLAGS flag) { } + static inline void record_arena_free(MEMFLAGS flag) { } + static inline void record_arena_size_change(int diff, MEMFLAGS flag) { } + static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, + MEMFLAGS flag = mtNone) { } + static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, + const NativeCallStack& stack, MEMFLAGS flag = mtNone) { } + static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { } + static inline Tracker get_virtual_memory_uncommit_tracker() { return Tracker(); } + static inline Tracker get_virtual_memory_release_tracker() { } + static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { } + static inline void record_thread_stack(void* addr, size_t size) { } + static inline void release_thread_stack(void* addr, size_t size) { } - private: - static Tracker _tkr; - - - public: - static inline void init_tracking_options(const char* option_line) { } - static inline bool is_on() { return false; } - static const char* reason() { return "Native memory tracking is not implemented"; } - static inline bool can_walk_stack() { return false; } - - static inline void bootstrap_single_thread() { } - static inline void bootstrap_multi_thread() { } - static inline void start() { } - - static inline void record_malloc(address addr, size_t size, MEMFLAGS flags, - address pc = 0, Thread* thread = NULL) { } - static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { } - static inline void record_arena_size(address addr, size_t size) { } - static inline void record_virtual_memory_reserve(address addr, size_t size, - MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { } - static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size, - MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { } - static inline void record_virtual_memory_commit(address addr, size_t size, - address pc = 0, Thread* thread = NULL) { } - static inline void record_virtual_memory_release(address addr, size_t size, - Thread* thread = NULL) { } - static inline void record_virtual_memory_type(address base, MEMFLAGS flags, - Thread* thread = NULL) { } - static inline Tracker get_realloc_tracker() { return _tkr; } - static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; } - static inline Tracker get_virtual_memory_release_tracker() { return _tkr; } - static inline bool baseline() { return false; } - static inline bool has_baseline() { return false; } - - static inline void set_autoShutdown(bool value) { } - static void shutdown(ShutdownReason reason) { } - static inline bool shutdown_in_progress() { return false; } - static bool print_memory_usage(BaselineOutputer& out, size_t unit, - bool summary_only = true) { return false; } - static bool compare_memory_usage(BaselineOutputer& out, size_t unit, - bool summary_only = true) { return false; } - - static bool wbtest_wait_for_data_merge() { return false; } - - static inline void sync() { } - static inline void thread_exiting(JavaThread* thread) { } + static void final_report(outputStream*) { } + static void error_report(outputStream*) { } }; +#else -#else // !INCLUDE_NMT +#include "runtime/atomic.hpp" +#include "runtime/threadCritical.hpp" +#include "services/mallocTracker.hpp" +#include "services/virtualMemoryTracker.hpp" -#include "memory/allocation.hpp" -#include "runtime/globals.hpp" -#include "runtime/mutex.hpp" -#include "runtime/os.hpp" -#include "runtime/thread.hpp" -#include "services/memPtr.hpp" -#include "services/memRecorder.hpp" -#include "services/memSnapshot.hpp" -#include "services/memTrackWorker.hpp" +extern volatile bool NMT_stack_walkable; -extern bool NMT_track_callsite; +#define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ + NativeCallStack(0, true) : NativeCallStack::EMPTY_STACK) +#define CALLER_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ + NativeCallStack(1, true) : NativeCallStack::EMPTY_STACK) -#ifndef MAX_UNSIGNED_LONG -#define MAX_UNSIGNED_LONG (unsigned long)(-1) -#endif +class MemBaseline; +class Mutex; -#ifdef ASSERT - #define DEBUG_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) -#else - #define DEBUG_CALLER_PC 0 -#endif - -// The thread closure walks threads to collect per-thread -// memory recorders at NMT sync point -class SyncThreadRecorderClosure : public ThreadClosure { - private: - int _thread_count; - +// Tracker is used for guarding 'release' semantics of virtual memory operation, to avoid +// the other thread obtains and records the same region that is just 'released' by current +// thread but before it can record the operation. +class Tracker : public StackObj { public: - SyncThreadRecorderClosure() { - _thread_count =0; - } - - void do_thread(Thread* thread); - int get_thread_count() const { - return _thread_count; - } -}; - -class BaselineOutputer; -class MemSnapshot; -class MemTrackWorker; -class Thread; -/* - * MemTracker is the 'gate' class to native memory tracking runtime. - */ -class MemTracker : AllStatic { - friend class GenerationData; - friend class MemTrackWorker; - friend class MemSnapshot; - friend class SyncThreadRecorderClosure; - - // NMT state - enum NMTStates { - NMT_uninited, // not yet initialized - NMT_bootstrapping_single_thread, // bootstrapping, VM is in single thread mode - NMT_bootstrapping_multi_thread, // bootstrapping, VM is about to enter multi-thread mode - NMT_started, // NMT fully started - NMT_shutdown_pending, // shutdown pending - NMT_final_shutdown, // in final phase of shutdown - NMT_shutdown // shutdown + enum TrackerType { + uncommit, + release }; public: - class Tracker : public StackObj { - friend class MemTracker; - public: - enum MemoryOperation { - NoOp, // no op - Malloc, // malloc - Realloc, // realloc - Free, // free - Reserve, // virtual memory reserve - Commit, // virtual memory commit - ReserveAndCommit, // virtual memory reserve and commit - StackAlloc = ReserveAndCommit, // allocate thread stack - Type, // assign virtual memory type - Uncommit, // virtual memory uncommit - Release, // virtual memory release - ArenaSize, // set arena size - StackRelease // release thread stack - }; + Tracker(enum TrackerType type) : _type(type) { } + void record(address addr, size_t size); + private: + enum TrackerType _type; + // Virtual memory tracking data structures are protected by ThreadCritical lock. + ThreadCritical _tc; +}; - - protected: - Tracker(MemoryOperation op, Thread* thr = NULL); - - public: - void discard(); - - void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL); - void record(address old_addr, address new_addr, size_t size, - MEMFLAGS flags, address pc = NULL); - - private: - bool _need_thread_critical_lock; - JavaThread* _java_thread; - MemoryOperation _op; // memory operation - jint _seq; // reserved sequence number - }; - - +class MemTracker : AllStatic { public: - // native memory tracking level - enum NMTLevel { - NMT_off, // native memory tracking is off - NMT_summary, // don't track callsite - NMT_detail // track callsite also - }; - - enum ShutdownReason { - NMT_shutdown_none, // no shutdown requested - NMT_shutdown_user, // user requested shutdown - NMT_normal, // normal shutdown, process exit - NMT_out_of_memory, // shutdown due to out of memory - NMT_initialization, // shutdown due to initialization failure - NMT_use_malloc_only, // can not combine NMT with UseMallocOnly flag - NMT_error_reporting, // shutdown by vmError::report_and_die() - NMT_out_of_generation, // running out of generation queue - NMT_sequence_overflow // overflow the sequence number - }; - - public: - // initialize NMT tracking level from command line options, called - // from VM command line parsing code - static void init_tracking_options(const char* option_line); - - // if NMT is enabled to record memory activities - static inline bool is_on() { - return (_tracking_level >= NMT_summary && - _state >= NMT_bootstrapping_single_thread); - } - - static inline enum NMTLevel tracking_level() { + static inline NMT_TrackingLevel tracking_level() { + if (_tracking_level == NMT_unknown) { + // No fencing is needed here, since JVM is in single-threaded + // mode. + _tracking_level = init_tracking_level(); + _cmdline_tracking_level = _tracking_level; + } return _tracking_level; } - // user readable reason for shutting down NMT - static const char* reason() { - switch(_reason) { - case NMT_shutdown_none: - return "Native memory tracking is not enabled"; - case NMT_shutdown_user: - return "Native memory tracking has been shutdown by user"; - case NMT_normal: - return "Native memory tracking has been shutdown due to process exiting"; - case NMT_out_of_memory: - return "Native memory tracking has been shutdown due to out of native memory"; - case NMT_initialization: - return "Native memory tracking failed to initialize"; - case NMT_error_reporting: - return "Native memory tracking has been shutdown due to error reporting"; - case NMT_out_of_generation: - return "Native memory tracking has been shutdown due to running out of generation buffer"; - case NMT_sequence_overflow: - return "Native memory tracking has been shutdown due to overflow the sequence number"; - case NMT_use_malloc_only: - return "Native memory tracking is not supported when UseMallocOnly is on"; - default: - ShouldNotReachHere(); - return NULL; + // A late initialization, for the stuff(s) can not be + // done in init_tracking_level(), which can NOT malloc + // any memory. + static void init(); + + // Shutdown native memory tracking + static void shutdown(); + + // Verify native memory tracking command line option. + // This check allows JVM to detect if compatible launcher + // is used. + // If an incompatible launcher is used, NMT may not be + // able to start, even it is enabled by command line option. + // A warning message should be given if it is encountered. + static bool check_launcher_nmt_support(const char* value); + + // This method checks native memory tracking environment + // variable value passed by launcher. + // Launcher only obligates to pass native memory tracking + // option value, but not obligates to validate the value, + // and launcher has option to discard native memory tracking + // option from the command line once it sets up the environment + // variable, so NMT has to catch the bad value here. + static bool verify_nmt_option(); + + // Transition the tracking level to specified level + static bool transition_to(NMT_TrackingLevel level); + + static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag, + const NativeCallStack& stack, NMT_TrackingLevel level) { + return MallocTracker::record_malloc(mem_base, size, flag, stack, level); + } + + static inline size_t malloc_header_size(NMT_TrackingLevel level) { + return MallocTracker::malloc_header_size(level); + } + + static size_t malloc_header_size(void* memblock) { + if (tracking_level() != NMT_off) { + return MallocTracker::get_header_size(memblock); + } + return 0; + } + + // To malloc base address, which is the starting address + // of malloc tracking header if tracking is enabled. + // Otherwise, it returns the same address. + static void* malloc_base(void* memblock); + + // Record malloc free and return malloc base address + static inline void* record_free(void* memblock) { + return MallocTracker::record_free(memblock); + } + + + // Record creation of an arena + static inline void record_new_arena(MEMFLAGS flag) { + if (tracking_level() < NMT_summary) return; + MallocTracker::record_new_arena(flag); + } + + // Record destruction of an arena + static inline void record_arena_free(MEMFLAGS flag) { + if (tracking_level() < NMT_summary) return; + MallocTracker::record_arena_free(flag); + } + + // Record arena size change. Arena size is the size of all arena + // chuncks that backing up the arena. + static inline void record_arena_size_change(int diff, MEMFLAGS flag) { + if (tracking_level() < NMT_summary) return; + MallocTracker::record_arena_size_change(diff, flag); + } + + static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, + MEMFLAGS flag = mtNone) { + if (tracking_level() < NMT_summary) return; + if (addr != NULL) { + ThreadCritical tc; + // Recheck to avoid potential racing during NMT shutdown + if (tracking_level() < NMT_summary) return; + VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag); } } - // test if we can walk native stack - static bool can_walk_stack() { - // native stack is not walkable during bootstrapping on sparc -#if defined(SPARC) - return (_state == NMT_started); -#else - return (_state >= NMT_bootstrapping_single_thread && _state <= NMT_started); -#endif - } - - // if native memory tracking tracks callsite - static inline bool track_callsite() { return _tracking_level == NMT_detail; } - - // NMT automatically shuts itself down under extreme situation by default. - // When the value is set to false, NMT will try its best to stay alive, - // even it has to slow down VM. - static inline void set_autoShutdown(bool value) { - AutoShutdownNMT = value; - if (AutoShutdownNMT && _slowdown_calling_thread) { - _slowdown_calling_thread = false; + static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, + const NativeCallStack& stack, MEMFLAGS flag = mtNone) { + if (tracking_level() < NMT_summary) return; + if (addr != NULL) { + ThreadCritical tc; + if (tracking_level() < NMT_summary) return; + VirtualMemoryTracker::add_reserved_region((address)addr, size, + stack, flag, true); } } - // shutdown native memory tracking capability. Native memory tracking - // can be shutdown by VM when it encounters low memory scenarios. - // Memory tracker should gracefully shutdown itself, and preserve the - // latest memory statistics for post morten diagnosis. - static void shutdown(ShutdownReason reason); - - // if there is shutdown requested - static inline bool shutdown_in_progress() { - return (_state >= NMT_shutdown_pending); - } - - // bootstrap native memory tracking, so it can start to collect raw data - // before worker thread can start - - // the first phase of bootstrapping, when VM still in single-threaded mode - static void bootstrap_single_thread(); - // the second phase of bootstrapping, VM is about or already in multi-threaded mode - static void bootstrap_multi_thread(); - - - // start() has to be called when VM still in single thread mode, but after - // command line option parsing is done. - static void start(); - - // record a 'malloc' call - static inline void record_malloc(address addr, size_t size, MEMFLAGS flags, - address pc = 0, Thread* thread = NULL) { - Tracker tkr(Tracker::Malloc, thread); - tkr.record(addr, size, flags, pc); - } - // record a 'free' call - static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { - Tracker tkr(Tracker::Free, thread); - tkr.record(addr, 0, flags, DEBUG_CALLER_PC); - } - - static inline void record_arena_size(address addr, size_t size) { - Tracker tkr(Tracker::ArenaSize); - tkr.record(addr, size); - } - - // record a virtual memory 'reserve' call - static inline void record_virtual_memory_reserve(address addr, size_t size, - MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { - assert(size > 0, "Sanity check"); - Tracker tkr(Tracker::Reserve, thread); - tkr.record(addr, size, flags, pc); - } - - static inline void record_thread_stack(address addr, size_t size, Thread* thr, - address pc = 0) { - Tracker tkr(Tracker::StackAlloc, thr); - tkr.record(addr, size, mtThreadStack, pc); - } - - static inline void release_thread_stack(address addr, size_t size, Thread* thr) { - Tracker tkr(Tracker::StackRelease, thr); - tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC); - } - - // record a virtual memory 'commit' call - static inline void record_virtual_memory_commit(address addr, size_t size, - address pc, Thread* thread = NULL) { - Tracker tkr(Tracker::Commit, thread); - tkr.record(addr, size, mtNone, pc); - } - - static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size, - MEMFLAGS flags, address pc, Thread* thread = NULL) { - Tracker tkr(Tracker::ReserveAndCommit, thread); - tkr.record(addr, size, flags, pc); - } - - static inline void record_virtual_memory_release(address addr, size_t size, - Thread* thread = NULL) { - if (is_on()) { - Tracker tkr(Tracker::Release, thread); - tkr.record(addr, size); + static inline void record_virtual_memory_commit(void* addr, size_t size, + const NativeCallStack& stack) { + if (tracking_level() < NMT_summary) return; + if (addr != NULL) { + ThreadCritical tc; + if (tracking_level() < NMT_summary) return; + VirtualMemoryTracker::add_committed_region((address)addr, size, stack); } } - // record memory type on virtual memory base address - static inline void record_virtual_memory_type(address base, MEMFLAGS flags, - Thread* thread = NULL) { - Tracker tkr(Tracker::Type); - tkr.record(base, 0, flags); - } - - // Get memory trackers for memory operations that can result race conditions. - // The memory tracker has to be obtained before realloc, virtual memory uncommit - // and virtual memory release, and call tracker.record() method if operation - // succeeded, or tracker.discard() to abort the tracking. - static inline Tracker get_realloc_tracker() { - return Tracker(Tracker::Realloc); - } - static inline Tracker get_virtual_memory_uncommit_tracker() { - return Tracker(Tracker::Uncommit); + assert(tracking_level() >= NMT_summary, "Check by caller"); + return Tracker(Tracker::uncommit); } static inline Tracker get_virtual_memory_release_tracker() { - return Tracker(Tracker::Release); + assert(tracking_level() >= NMT_summary, "Check by caller"); + return Tracker(Tracker::release); } - - // create memory baseline of current memory snapshot - static bool baseline(); - // is there a memory baseline - static bool has_baseline() { - return _baseline.baselined(); - } - - // print memory usage from current snapshot - static bool print_memory_usage(BaselineOutputer& out, size_t unit, - bool summary_only = true); - // compare memory usage between current snapshot and baseline - static bool compare_memory_usage(BaselineOutputer& out, size_t unit, - bool summary_only = true); - - // the version for whitebox testing support, it ensures that all memory - // activities before this method call, are reflected in the snapshot - // database. - static bool wbtest_wait_for_data_merge(); - - // sync is called within global safepoint to synchronize nmt data - static void sync(); - - // called when a thread is about to exit - static void thread_exiting(JavaThread* thread); - - // retrieve global snapshot - static MemSnapshot* get_snapshot() { - if (shutdown_in_progress()) { - return NULL; - } - return _snapshot; - } - - // print tracker stats - NOT_PRODUCT(static void print_tracker_stats(outputStream* st);) - NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);) - - private: - // start native memory tracking worker thread - static bool start_worker(MemSnapshot* snapshot); - - // called by worker thread to complete shutdown process - static void final_shutdown(); - - protected: - // retrieve per-thread recorder of the specified thread. - // if the recorder is full, it will be enqueued to overflow - // queue, a new recorder is acquired from recorder pool or a - // new instance is created. - // when thread == NULL, it means global recorder - static MemRecorder* get_thread_recorder(JavaThread* thread); - - // per-thread recorder pool - static void release_thread_recorder(MemRecorder* rec); - static void delete_all_pooled_recorders(); - - // pending recorder queue. Recorders are queued to pending queue - // when they are overflowed or collected at nmt sync point. - static void enqueue_pending_recorder(MemRecorder* rec); - static MemRecorder* get_pending_recorders(); - static void delete_all_pending_recorders(); - - // write a memory tracking record in recorder - static void write_tracking_record(address addr, MEMFLAGS type, - size_t size, jint seq, address pc, JavaThread* thread); - - static bool is_single_threaded_bootstrap() { - return _state == NMT_bootstrapping_single_thread; - } - - static void check_NMT_load(Thread* thr) { - assert(thr != NULL, "Sanity check"); - if (_slowdown_calling_thread && thr != _worker_thread) { -#ifdef _WINDOWS - // On Windows, os::NakedYield() does not work as well - // as os::yield_all() - os::yield_all(); -#else - // On Solaris, os::yield_all() depends on os::sleep() - // which requires JavaTherad in _thread_in_vm state. - // Transits thread to _thread_in_vm state can be dangerous - // if caller holds lock, as it may deadlock with Threads_lock. - // So use NaKedYield instead. - // - // Linux and BSD, NakedYield() and yield_all() implementations - // are the same. - os::NakedYield(); -#endif + static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { + if (tracking_level() < NMT_summary) return; + if (addr != NULL) { + ThreadCritical tc; + if (tracking_level() < NMT_summary) return; + VirtualMemoryTracker::set_reserved_region_type((address)addr, flag); } } - static void inc_pending_op_count() { - Atomic::inc(&_pending_op_count); + static inline void record_thread_stack(void* addr, size_t size) { + if (tracking_level() < NMT_summary) return; + if (addr != NULL) { + // uses thread stack malloc slot for book keeping number of threads + MallocMemorySummary::record_malloc(0, mtThreadStack); + record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack); + } } - static void dec_pending_op_count() { - Atomic::dec(&_pending_op_count); - assert(_pending_op_count >= 0, "Sanity check"); + static inline void release_thread_stack(void* addr, size_t size) { + if (tracking_level() < NMT_summary) return; + if (addr != NULL) { + // uses thread stack malloc slot for book keeping number of threads + MallocMemorySummary::record_free(0, mtThreadStack); + ThreadCritical tc; + if (tracking_level() < NMT_summary) return; + VirtualMemoryTracker::remove_released_region((address)addr, size); + } } + // Query lock is used to synchronize the access to tracking data. + // So far, it is only used by JCmd query, but it may be used by + // other tools. + static inline Mutex* query_lock() { return _query_lock; } + + // Make a final report or report for hs_err file. + static void error_report(outputStream* output) { + if (tracking_level() >= NMT_summary) { + report(true, output); // just print summary for error case. + } + } + + static void final_report(outputStream* output) { + NMT_TrackingLevel level = tracking_level(); + if (level >= NMT_summary) { + report(level == NMT_summary, output); + } + } + + + // Stored baseline + static inline MemBaseline& get_baseline() { + return _baseline; + } + + static NMT_TrackingLevel cmdline_tracking_level() { + return _cmdline_tracking_level; + } + + static void tuning_statistics(outputStream* out); private: - // retrieve a pooled memory record or create new one if there is not - // one available - static MemRecorder* get_new_or_pooled_instance(); - static void create_memory_record(address addr, MEMFLAGS type, - size_t size, address pc, Thread* thread); - static void create_record_in_recorder(address addr, MEMFLAGS type, - size_t size, address pc, JavaThread* thread); - - static void set_current_processing_generation(unsigned long generation) { - _worker_thread_idle = false; - _processing_generation = generation; - } - - static void report_worker_idle() { - _worker_thread_idle = true; - } + static NMT_TrackingLevel init_tracking_level(); + static void report(bool summary_only, outputStream* output); private: - // global memory snapshot - static MemSnapshot* _snapshot; - - // a memory baseline of snapshot + // Tracking level + static volatile NMT_TrackingLevel _tracking_level; + // If NMT option value passed by launcher through environment + // variable is valid + static bool _is_nmt_env_valid; + // command line tracking level + static NMT_TrackingLevel _cmdline_tracking_level; + // Stored baseline static MemBaseline _baseline; - - // query lock + // Query lock static Mutex* _query_lock; - - // a thread can start to allocate memory before it is attached - // to VM 'Thread', those memory activities are recorded here. - // ThreadCritical is required to guard this global recorder. - static MemRecorder* volatile _global_recorder; - - // main thread id - debug_only(static intx _main_thread_tid;) - - // pending recorders to be merged - static MemRecorder* volatile _merge_pending_queue; - - NOT_PRODUCT(static volatile jint _pending_recorder_count;) - - // pooled memory recorders - static MemRecorder* volatile _pooled_recorders; - - // memory recorder pool management, uses following - // counter to determine if a released memory recorder - // should be pooled - - // latest thread count - static int _thread_count; - // pooled recorder count - static volatile jint _pooled_recorder_count; - - - // worker thread to merge pending recorders into snapshot - static MemTrackWorker* _worker_thread; - - // how many safepoints we skipped without entering sync point - static int _sync_point_skip_count; - - // if the tracker is properly intialized - static bool _is_tracker_ready; - // tracking level (off, summary and detail) - static enum NMTLevel _tracking_level; - - // current nmt state - static volatile enum NMTStates _state; - // the reason for shutting down nmt - static enum ShutdownReason _reason; - // the generation that NMT is processing - static volatile unsigned long _processing_generation; - // although NMT is still procesing current generation, but - // there is not more recorder to process, set idle state - static volatile bool _worker_thread_idle; - - // if NMT should slow down calling thread to allow - // worker thread to catch up - static volatile bool _slowdown_calling_thread; - - // pending memory op count. - // Certain memory ops need to pre-reserve sequence number - // before memory operation can happen to avoid race condition. - // See MemTracker::Tracker for detail - static volatile jint _pending_op_count; }; -#endif // !INCLUDE_NMT +#endif // INCLUDE_NMT #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP + --- ./hotspot/src/share/vm/services/memoryManager.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/memoryManager.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,6 +28,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/orderAccess.inline.hpp" #include "services/lowMemoryDetector.hpp" #include "services/management.hpp" #include "services/memoryManager.hpp" --- ./hotspot/src/share/vm/services/memoryPool.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/memoryPool.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -29,6 +29,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/orderAccess.inline.hpp" #include "services/lowMemoryDetector.hpp" #include "services/management.hpp" #include "services/memoryManager.hpp" --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/nmtCommon.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "services/nmtCommon.hpp" + +const char* NMTUtil::_memory_type_names[] = { + "Java Heap", + "Class", + "Thread", + "Thread Stack", + "Code", + "GC", + "Compiler", + "Internal", + "Other", + "Symbol", + "Native Memory Tracking", + "Shared class space", + "Arena Chunk", + "Test", + "Tracing", + "Unknown" +}; + + +const char* NMTUtil::scale_name(size_t scale) { + switch(scale) { + case K: return "KB"; + case M: return "MB"; + case G: return "GB"; + } + ShouldNotReachHere(); + return NULL; +} + +size_t NMTUtil::scale_from_name(const char* scale) { + assert(scale != NULL, "Null pointer check"); + if (strncmp(scale, "KB", 2) == 0 || + strncmp(scale, "kb", 2) == 0) { + return K; + } else if (strncmp(scale, "MB", 2) == 0 || + strncmp(scale, "mb", 2) == 0) { + return M; + } else if (strncmp(scale, "GB", 2) == 0 || + strncmp(scale, "gb", 2) == 0) { + return G; + } else { + return 0; // Invalid value + } + return K; +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/nmtCommon.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_SERVICES_NMT_COMMON_HPP +#define SHARE_VM_SERVICES_NMT_COMMON_HPP + +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + +#define CALC_OBJ_SIZE_IN_TYPE(obj, type) (align_size_up_(sizeof(obj), sizeof(type))/sizeof(type)) + +// Data type for memory counters +#ifdef _LP64 + typedef jlong MemoryCounterType; +#else + typedef jint MemoryCounterType; +#endif + +// Native memory tracking level +enum NMT_TrackingLevel { + NMT_unknown = 0xFF, + NMT_off = 0x00, + NMT_minimal = 0x01, + NMT_summary = 0x02, + NMT_detail = 0x03 +}; + +// Number of stack frames to capture. This is a +// build time decision. +const int NMT_TrackingStackDepth = 4; + +// A few common utilities for native memory tracking +class NMTUtil : AllStatic { + public: + // Map memory type to index + static inline int flag_to_index(MEMFLAGS flag) { + return (flag & 0xff); + } + + // Map memory type to human readable name + static const char* flag_to_name(MEMFLAGS flag) { + return _memory_type_names[flag_to_index(flag)]; + } + + // Map an index to memory type + static MEMFLAGS index_to_flag(int index) { + return (MEMFLAGS)index; + } + + // Memory size scale + static const char* scale_name(size_t scale); + static size_t scale_from_name(const char* scale); + + // Translate memory size in specified scale + static size_t amount_in_scale(size_t amount, size_t scale) { + return (amount + scale / 2) / scale; + } + private: + static const char* _memory_type_names[mt_number_of_types]; +}; + + +#endif --- ./hotspot/src/share/vm/services/nmtDCmd.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/nmtDCmd.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -22,6 +22,8 @@ * */ #include "precompiled.hpp" + +#include "runtime/mutexLocker.hpp" #include "services/nmtDCmd.hpp" #include "services/memReporter.hpp" #include "services/memTracker.hpp" @@ -49,13 +51,8 @@ _shutdown("shutdown", "request runtime to shutdown itself and free the " \ "memory used by runtime.", "BOOLEAN", false, "false"), - _auto_shutdown("autoShutdown", "automatically shutdown itself under " \ - "stress situation", - "BOOLEAN", true, "true"), -#ifndef PRODUCT - _debug("debug", "print tracker statistics. Debug only, not thread safe", \ + _statistics("statistics", "print tracker statistics for tuning purpose.", \ "BOOLEAN", false, "false"), -#endif _scale("scale", "Memory usage in which scale, KB, MB or GB", "STRING", false, "KB") { _dcmdparser.add_dcmd_option(&_summary); @@ -64,25 +61,30 @@ _dcmdparser.add_dcmd_option(&_summary_diff); _dcmdparser.add_dcmd_option(&_detail_diff); _dcmdparser.add_dcmd_option(&_shutdown); - _dcmdparser.add_dcmd_option(&_auto_shutdown); -#ifndef PRODUCT - _dcmdparser.add_dcmd_option(&_debug); -#endif + _dcmdparser.add_dcmd_option(&_statistics); _dcmdparser.add_dcmd_option(&_scale); } + +size_t NMTDCmd::get_scale(const char* scale) const { + if (scale == NULL) return 0; + return NMTUtil::scale_from_name(scale); +} + void NMTDCmd::execute(DCmdSource source, TRAPS) { + // Check NMT state + // native memory tracking has to be on + if (MemTracker::tracking_level() == NMT_off) { + output()->print_cr("Native memory tracking is not enabled"); + return; + } else if (MemTracker::tracking_level() == NMT_minimal) { + output()->print_cr("Native memory tracking has been shutdown"); + return; + } + const char* scale_value = _scale.value(); - size_t scale_unit; - if (strcmp(scale_value, "KB") == 0 || strcmp(scale_value, "kb") == 0) { - scale_unit = K; - } else if (strcmp(scale_value, "MB") == 0 || - strcmp(scale_value, "mb") == 0) { - scale_unit = M; - } else if (strcmp(scale_value, "GB") == 0 || - strcmp(scale_value, "gb") == 0) { - scale_unit = G; - } else { + size_t scale_unit = get_scale(scale_value); + if (scale_unit == 0) { output()->print_cr("Incorrect scale value: %s", scale_value); return; } @@ -94,19 +96,11 @@ if (_summary_diff.is_set() && _summary_diff.value()) { ++nopt; } if (_detail_diff.is_set() && _detail_diff.value()) { ++nopt; } if (_shutdown.is_set() && _shutdown.value()) { ++nopt; } - if (_auto_shutdown.is_set()) { ++nopt; } - -#ifndef PRODUCT - if (_debug.is_set() && _debug.value()) { ++nopt; } -#endif + if (_statistics.is_set() && _statistics.value()) { ++nopt; } if (nopt > 1) { output()->print_cr("At most one of the following option can be specified: " \ - "summary, detail, baseline, summary.diff, detail.diff, shutdown" -#ifndef PRODUCT - ", debug" -#endif - ); + "summary, detail, baseline, summary.diff, detail.diff, shutdown"); return; } else if (nopt == 0) { if (_summary.is_set()) { @@ -117,53 +111,47 @@ } } -#ifndef PRODUCT - if (_debug.value()) { - output()->print_cr("debug command is NOT thread-safe, may cause crash"); - MemTracker::print_tracker_stats(output()); + // Serialize NMT query + MutexLocker locker(MemTracker::query_lock()); + + if (_summary.value()) { + report(true, scale_unit); + } else if (_detail.value()) { + if (!check_detail_tracking_level(output())) { return; } -#endif - - // native memory tracking has to be on - if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) { - // if it is not on, what's the reason? - output()->print_cr("%s", MemTracker::reason()); + report(false, scale_unit); + } else if (_baseline.value()) { + MemBaseline& baseline = MemTracker::get_baseline(); + if (!baseline.baseline(MemTracker::tracking_level() != NMT_detail)) { + output()->print_cr("Baseline failed"); + } else { + output()->print_cr("Baseline succeeded"); + } + } else if (_summary_diff.value()) { + MemBaseline& baseline = MemTracker::get_baseline(); + if (baseline.baseline_type() >= MemBaseline::Summary_baselined) { + report_diff(true, scale_unit); + } else { + output()->print_cr("No baseline for comparison"); + } + } else if (_detail_diff.value()) { + if (!check_detail_tracking_level(output())) { return; } - - if (_summary.value()) { - BaselineTTYOutputer outputer(output()); - MemTracker::print_memory_usage(outputer, scale_unit, true); - } else if (_detail.value()) { - BaselineTTYOutputer outputer(output()); - MemTracker::print_memory_usage(outputer, scale_unit, false); - } else if (_baseline.value()) { - if (MemTracker::baseline()) { - output()->print_cr("Successfully baselined."); + MemBaseline& baseline = MemTracker::get_baseline(); + if (baseline.baseline_type() == MemBaseline::Detail_baselined) { + report_diff(false, scale_unit); } else { - output()->print_cr("Baseline failed."); - } - } else if (_summary_diff.value()) { - if (MemTracker::has_baseline()) { - BaselineTTYOutputer outputer(output()); - MemTracker::compare_memory_usage(outputer, scale_unit, true); - } else { - output()->print_cr("No baseline to compare, run 'baseline' command first"); - } - } else if (_detail_diff.value()) { - if (MemTracker::has_baseline()) { - BaselineTTYOutputer outputer(output()); - MemTracker::compare_memory_usage(outputer, scale_unit, false); - } else { - output()->print_cr("No baseline to compare to, run 'baseline' command first"); + output()->print_cr("No detail baseline for comparison"); } } else if (_shutdown.value()) { - MemTracker::shutdown(MemTracker::NMT_shutdown_user); - output()->print_cr("Shutdown is in progress, it will take a few moments to " \ - "completely shutdown"); - } else if (_auto_shutdown.is_set()) { - MemTracker::set_autoShutdown(_auto_shutdown.value()); + MemTracker::shutdown(); + output()->print_cr("Native memory tracking has been turned off"); + } else if (_statistics.value()) { + if (check_detail_tracking_level(output())) { + MemTracker::tuning_statistics(output()); + } } else { ShouldNotReachHere(); output()->print_cr("Unknown command"); @@ -181,3 +169,46 @@ } } +void NMTDCmd::report(bool summaryOnly, size_t scale_unit) { + MemBaseline baseline; + if (baseline.baseline(summaryOnly)) { + if (summaryOnly) { + MemSummaryReporter rpt(baseline, output(), scale_unit); + rpt.report(); + } else { + MemDetailReporter rpt(baseline, output(), scale_unit); + rpt.report(); + } + } +} + +void NMTDCmd::report_diff(bool summaryOnly, size_t scale_unit) { + MemBaseline& early_baseline = MemTracker::get_baseline(); + assert(early_baseline.baseline_type() != MemBaseline::Not_baselined, + "Not yet baselined"); + assert(summaryOnly || early_baseline.baseline_type() == MemBaseline::Detail_baselined, + "Not a detail baseline"); + + MemBaseline baseline; + if (baseline.baseline(summaryOnly)) { + if (summaryOnly) { + MemSummaryDiffReporter rpt(early_baseline, baseline, output(), scale_unit); + rpt.report_diff(); + } else { + MemDetailDiffReporter rpt(early_baseline, baseline, output(), scale_unit); + rpt.report_diff(); + } + } +} + +bool NMTDCmd::check_detail_tracking_level(outputStream* out) { + if (MemTracker::tracking_level() == NMT_detail) { + return true; + } else if (MemTracker::cmdline_tracking_level() == NMT_detail) { + out->print_cr("Tracking level has been downgraded due to lack of resources"); + return false; + } else { + out->print_cr("Detail tracking is not enabled"); + return false; + } +} --- ./hotspot/src/share/vm/services/nmtDCmd.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/nmtDCmd.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,8 +25,12 @@ #ifndef SHARE_VM_SERVICES_NMT_DCMD_HPP #define SHARE_VM_SERVICES_NMT_DCMD_HPP +#if INCLUDE_NMT + #include "services/diagnosticArgument.hpp" #include "services/diagnosticFramework.hpp" +#include "services/memBaseline.hpp" +#include "services/mallocTracker.hpp" /** * Native memory tracking DCmd implementation @@ -39,10 +43,7 @@ DCmdArgument _summary_diff; DCmdArgument _detail_diff; DCmdArgument _shutdown; - DCmdArgument _auto_shutdown; -#ifndef PRODUCT - DCmdArgument _debug; -#endif + DCmdArgument _statistics; DCmdArgument _scale; public: @@ -61,6 +62,17 @@ } static int num_arguments(); virtual void execute(DCmdSource source, TRAPS); + + private: + void report(bool summaryOnly, size_t scale); + void report_diff(bool summaryOnly, size_t scale); + + size_t get_scale(const char* scale) const; + + // check if NMT running at detail tracking level + bool check_detail_tracking_level(outputStream* out); }; +#endif // INCLUDE_NMT + #endif // SHARE_VM_SERVICES_NMT_DCMD_HPP --- ./hotspot/src/share/vm/services/runtimeService.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/runtimeService.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -46,6 +46,7 @@ PerfCounter* RuntimeService::_thread_interrupt_signaled_count = NULL; PerfCounter* RuntimeService::_interrupted_before_count = NULL; PerfCounter* RuntimeService::_interrupted_during_count = NULL; +double RuntimeService::_last_safepoint_sync_time_sec = 0.0; void RuntimeService::init() { // Make sure the VM version is initialized @@ -128,6 +129,7 @@ // update the time stamp to begin recording safepoint time _safepoint_timer.update(); + _last_safepoint_sync_time_sec = 0.0; if (UsePerfData) { _total_safepoints->inc(); if (_app_timer.is_updated()) { @@ -140,6 +142,9 @@ if (UsePerfData) { _sync_time_ticks->inc(_safepoint_timer.ticks_since_update()); } + if (PrintGCApplicationStoppedTime) { + _last_safepoint_sync_time_sec = last_safepoint_time_sec(); + } } void RuntimeService::record_safepoint_end() { @@ -155,8 +160,10 @@ gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->print_cr("Total time for which application threads " - "were stopped: %3.7f seconds", - last_safepoint_time_sec()); + "were stopped: %3.7f seconds, " + "Stopping threads took: %3.7f seconds", + last_safepoint_time_sec(), + _last_safepoint_sync_time_sec); } // update the time stamp to begin recording app time --- ./hotspot/src/share/vm/services/runtimeService.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/runtimeService.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -40,6 +40,7 @@ static TimeStamp _safepoint_timer; static TimeStamp _app_timer; + static double _last_safepoint_sync_time_sec; public: static void init(); --- ./hotspot/src/share/vm/services/threadService.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/services/threadService.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -33,6 +33,7 @@ #include "runtime/init.hpp" #include "runtime/thread.hpp" #include "runtime/vframe.hpp" +#include "runtime/thread.inline.hpp" #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" #include "services/threadService.hpp" --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/virtualMemoryTracker.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,477 @@ +/* + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "runtime/threadCritical.hpp" +#include "services/virtualMemoryTracker.hpp" + +size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; + +void VirtualMemorySummary::initialize() { + assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check"); + // Use placement operator new to initialize static data area. + ::new ((void*)_snapshot) VirtualMemorySnapshot(); +} + +SortedLinkedList* VirtualMemoryTracker::_reserved_regions; + +int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) { + return r1.compare(r2); +} + +int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) { + return r1.compare(r2); +} + +bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) { + assert(addr != NULL, "Invalid address"); + assert(size > 0, "Invalid size"); + assert(contain_region(addr, size), "Not contain this region"); + + if (all_committed()) return true; + + CommittedMemoryRegion committed_rgn(addr, size, stack); + LinkedListNode* node = _committed_regions.find_node(committed_rgn); + if (node != NULL) { + CommittedMemoryRegion* rgn = node->data(); + if (rgn->same_region(addr, size)) { + return true; + } + + if (rgn->adjacent_to(addr, size)) { + // check if the next region covers this committed region, + // the regions may not be merged due to different call stacks + LinkedListNode* next = + node->next(); + if (next != NULL && next->data()->contain_region(addr, size)) { + if (next->data()->same_region(addr, size)) { + next->data()->set_call_stack(stack); + } + return true; + } + if (rgn->call_stack()->equals(stack)) { + VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag()); + // the two adjacent regions have the same call stack, merge them + rgn->expand_region(addr, size); + VirtualMemorySummary::record_committed_memory(rgn->size(), flag()); + return true; + } + VirtualMemorySummary::record_committed_memory(size, flag()); + if (rgn->base() > addr) { + return _committed_regions.insert_before(committed_rgn, node) != NULL; + } else { + return _committed_regions.insert_after(committed_rgn, node) != NULL; + } + } + assert(rgn->contain_region(addr, size), "Must cover this region"); + return true; + } else { + // New committed region + VirtualMemorySummary::record_committed_memory(size, flag()); + return add_committed_region(committed_rgn); + } +} + +void ReservedMemoryRegion::set_all_committed(bool b) { + if (all_committed() != b) { + _all_committed = b; + if (b) { + VirtualMemorySummary::record_committed_memory(size(), flag()); + } + } +} + +bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode* node, + address addr, size_t size) { + assert(addr != NULL, "Invalid address"); + assert(size > 0, "Invalid size"); + + CommittedMemoryRegion* rgn = node->data(); + assert(rgn->contain_region(addr, size), "Has to be contained"); + assert(!rgn->same_region(addr, size), "Can not be the same region"); + + if (rgn->base() == addr || + rgn->end() == addr + size) { + rgn->exclude_region(addr, size); + return true; + } else { + // split this region + address top =rgn->end(); + // use this region for lower part + size_t exclude_size = rgn->end() - addr; + rgn->exclude_region(addr, exclude_size); + + // higher part + address high_base = addr + size; + size_t high_size = top - high_base; + + CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack()); + LinkedListNode* high_node = _committed_regions.add(high_rgn); + assert(high_node == NULL || node->next() == high_node, "Should be right after"); + return (high_node != NULL); + } + + return false; +} + +bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { + // uncommit stack guard pages + if (flag() == mtThreadStack && !same_region(addr, sz)) { + return true; + } + + assert(addr != NULL, "Invalid address"); + assert(sz > 0, "Invalid size"); + + if (all_committed()) { + assert(_committed_regions.is_empty(), "Sanity check"); + assert(contain_region(addr, sz), "Reserved region does not contain this region"); + set_all_committed(false); + VirtualMemorySummary::record_uncommitted_memory(sz, flag()); + if (same_region(addr, sz)) { + return true; + } else { + CommittedMemoryRegion rgn(base(), size(), *call_stack()); + if (rgn.base() == addr || rgn.end() == (addr + sz)) { + rgn.exclude_region(addr, sz); + return add_committed_region(rgn); + } else { + // split this region + // top of the whole region + address top =rgn.end(); + // use this region for lower part + size_t exclude_size = rgn.end() - addr; + rgn.exclude_region(addr, exclude_size); + if (add_committed_region(rgn)) { + // higher part + address high_base = addr + sz; + size_t high_size = top - high_base; + CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::EMPTY_STACK); + return add_committed_region(high_rgn); + } else { + return false; + } + } + } + } else { + // we have to walk whole list to remove the committed regions in + // specified range + LinkedListNode* head = + _committed_regions.head(); + LinkedListNode* prev = NULL; + VirtualMemoryRegion uncommitted_rgn(addr, sz); + + while (head != NULL && !uncommitted_rgn.is_empty()) { + CommittedMemoryRegion* crgn = head->data(); + // this committed region overlaps to region to uncommit + if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) { + if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) { + // find matched region, remove the node will do + VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag()); + _committed_regions.remove_after(prev); + return true; + } else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) { + // this committed region contains whole uncommitted region + VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag()); + return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size()); + } else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) { + // this committed region has been uncommitted + size_t exclude_size = crgn->end() - uncommitted_rgn.base(); + uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size); + VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); + LinkedListNode* tmp = head; + head = head->next(); + _committed_regions.remove_after(prev); + continue; + } else if (crgn->contain_address(uncommitted_rgn.base())) { + size_t toUncommitted = crgn->end() - uncommitted_rgn.base(); + crgn->exclude_region(uncommitted_rgn.base(), toUncommitted); + uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted); + VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag()); + } else if (uncommitted_rgn.contain_address(crgn->base())) { + size_t toUncommitted = uncommitted_rgn.end() - crgn->base(); + crgn->exclude_region(crgn->base(), toUncommitted); + uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted, + toUncommitted); + VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag()); + } + } + prev = head; + head = head->next(); + } + } + + return true; +} + +void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) { + assert(addr != NULL, "Invalid address"); + + // split committed regions + LinkedListNode* head = + _committed_regions.head(); + LinkedListNode* prev = NULL; + + while (head != NULL) { + if (head->data()->base() >= addr) { + break; + } + prev = head; + head = head->next(); + } + + if (head != NULL) { + if (prev != NULL) { + prev->set_next(head->next()); + } else { + _committed_regions.set_head(NULL); + } + } + + rgn._committed_regions.set_head(head); +} + +size_t ReservedMemoryRegion::committed_size() const { + if (all_committed()) { + return size(); + } else { + size_t committed = 0; + LinkedListNode* head = + _committed_regions.head(); + while (head != NULL) { + committed += head->data()->size(); + head = head->next(); + } + return committed; + } +} + +void ReservedMemoryRegion::set_flag(MEMFLAGS f) { + assert((flag() == mtNone || flag() == f), "Overwrite memory type"); + if (flag() != f) { + VirtualMemorySummary::move_reserved_memory(flag(), f, size()); + VirtualMemorySummary::move_committed_memory(flag(), f, committed_size()); + _flag = f; + } +} + +bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) { + if (level >= NMT_summary) { + VirtualMemorySummary::initialize(); + } + return true; +} + +bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) { + if (level >= NMT_summary) { + _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT) + SortedLinkedList(); + return (_reserved_regions != NULL); + } + return true; +} + +bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, + const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) { + assert(base_addr != NULL, "Invalid address"); + assert(size > 0, "Invalid size"); + assert(_reserved_regions != NULL, "Sanity check"); + ReservedMemoryRegion rgn(base_addr, size, stack, flag); + ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); + LinkedListNode* node; + if (reserved_rgn == NULL) { + VirtualMemorySummary::record_reserved_memory(size, flag); + node = _reserved_regions->add(rgn); + if (node != NULL) { + node->data()->set_all_committed(all_committed); + return true; + } else { + return false; + } + } else { + if (reserved_rgn->same_region(base_addr, size)) { + reserved_rgn->set_call_stack(stack); + reserved_rgn->set_flag(flag); + return true; + } else if (reserved_rgn->adjacent_to(base_addr, size)) { + VirtualMemorySummary::record_reserved_memory(size, flag); + reserved_rgn->expand_region(base_addr, size); + reserved_rgn->set_call_stack(stack); + return true; + } else { + // Overlapped reservation. + // It can happen when the regions are thread stacks, as JNI + // thread does not detach from VM before exits, and leads to + // leak JavaThread object + if (reserved_rgn->flag() == mtThreadStack) { + guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached"); + // Overwrite with new region + + // Release old region + VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag()); + VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag()); + + // Add new region + VirtualMemorySummary::record_reserved_memory(rgn.size(), flag); + + *reserved_rgn = rgn; + return true; + } + + // CDS mapping region. + // CDS reserves the whole region for mapping CDS archive, then maps each section into the region. + // NMT reports CDS as a whole. + if (reserved_rgn->flag() == mtClassShared) { + assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region"); + return true; + } + + ShouldNotReachHere(); + return false; + } + } +} + +void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) { + assert(addr != NULL, "Invalid address"); + assert(_reserved_regions != NULL, "Sanity check"); + + ReservedMemoryRegion rgn(addr, 1); + ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); + if (reserved_rgn != NULL) { + assert(reserved_rgn->contain_address(addr), "Containment"); + if (reserved_rgn->flag() != flag) { + assert(reserved_rgn->flag() == mtNone, "Overwrite memory type"); + reserved_rgn->set_flag(flag); + } + } +} + +bool VirtualMemoryTracker::add_committed_region(address addr, size_t size, + const NativeCallStack& stack) { + assert(addr != NULL, "Invalid address"); + assert(size > 0, "Invalid size"); + assert(_reserved_regions != NULL, "Sanity check"); + + ReservedMemoryRegion rgn(addr, size); + ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); + + assert(reserved_rgn != NULL, "No reserved region"); + assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); + return reserved_rgn->add_committed_region(addr, size, stack); +} + +bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) { + assert(addr != NULL, "Invalid address"); + assert(size > 0, "Invalid size"); + assert(_reserved_regions != NULL, "Sanity check"); + + ReservedMemoryRegion rgn(addr, size); + ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); + assert(reserved_rgn != NULL, "No reserved region"); + assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); + return reserved_rgn->remove_uncommitted_region(addr, size); +} + +bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { + assert(addr != NULL, "Invalid address"); + assert(size > 0, "Invalid size"); + assert(_reserved_regions != NULL, "Sanity check"); + + ReservedMemoryRegion rgn(addr, size); + ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); + + assert(reserved_rgn != NULL, "No reserved region"); + + // uncommit regions within the released region + if (!reserved_rgn->remove_uncommitted_region(addr, size)) { + return false; + } + + + VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag()); + + if (reserved_rgn->same_region(addr, size)) { + return _reserved_regions->remove(rgn); + } else { + assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); + if (reserved_rgn->base() == addr || + reserved_rgn->end() == addr + size) { + reserved_rgn->exclude_region(addr, size); + return true; + } else { + address top = reserved_rgn->end(); + address high_base = addr + size; + ReservedMemoryRegion high_rgn(high_base, top - high_base, + *reserved_rgn->call_stack(), reserved_rgn->flag()); + + // use original region for lower region + reserved_rgn->exclude_region(addr, top - addr); + LinkedListNode* new_rgn = _reserved_regions->add(high_rgn); + if (new_rgn == NULL) { + return false; + } else { + reserved_rgn->move_committed_regions(addr, *new_rgn->data()); + return true; + } + } + } +} + + +bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { + assert(_reserved_regions != NULL, "Sanity check"); + ThreadCritical tc; + // Check that the _reserved_regions haven't been deleted. + if (_reserved_regions != NULL) { + LinkedListNode* head = _reserved_regions->head(); + while (head != NULL) { + const ReservedMemoryRegion* rgn = head->peek(); + if (!walker->do_allocation_site(rgn)) { + return false; + } + head = head->next(); + } + } + return true; +} + +// Transition virtual memory tracking level. +bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { + assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything"); + if (to == NMT_minimal) { + assert(from == NMT_summary || from == NMT_detail, "Just check"); + // Clean up virtual memory tracking data structures. + ThreadCritical tc; + // Check for potential race with other thread calling transition + if (_reserved_regions != NULL) { + delete _reserved_regions; + _reserved_regions = NULL; + } + } + + return true; +} + + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/services/virtualMemoryTracker.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP +#define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP + +#if INCLUDE_NMT + +#include "memory/allocation.hpp" +#include "services/allocationSite.hpp" +#include "services/nmtCommon.hpp" +#include "utilities/linkedlist.hpp" +#include "utilities/nativeCallStack.hpp" +#include "utilities/ostream.hpp" + + +/* + * Virtual memory counter + */ +class VirtualMemory VALUE_OBJ_CLASS_SPEC { + private: + size_t _reserved; + size_t _committed; + + public: + VirtualMemory() : _reserved(0), _committed(0) { } + + inline void reserve_memory(size_t sz) { _reserved += sz; } + inline void commit_memory (size_t sz) { + _committed += sz; + assert(_committed <= _reserved, "Sanity check"); + } + + inline void release_memory (size_t sz) { + assert(_reserved >= sz, "Negative amount"); + _reserved -= sz; + } + + inline void uncommit_memory(size_t sz) { + assert(_committed >= sz, "Negative amount"); + _committed -= sz; + } + + inline size_t reserved() const { return _reserved; } + inline size_t committed() const { return _committed; } +}; + +// Virtual memory allocation site, keeps track where the virtual memory is reserved. +class VirtualMemoryAllocationSite : public AllocationSite { + public: + VirtualMemoryAllocationSite(const NativeCallStack& stack) : + AllocationSite(stack) { } + + inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); } + inline void commit_memory (size_t sz) { data()->commit_memory(sz); } + inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); } + inline void release_memory(size_t sz) { data()->release_memory(sz); } + inline size_t reserved() const { return peek()->reserved(); } + inline size_t committed() const { return peek()->committed(); } +}; + +class VirtualMemorySummary; + +// This class represents a snapshot of virtual memory at a given time. +// The latest snapshot is saved in a static area. +class VirtualMemorySnapshot : public ResourceObj { + friend class VirtualMemorySummary; + + private: + VirtualMemory _virtual_memory[mt_number_of_types]; + + public: + inline VirtualMemory* by_type(MEMFLAGS flag) { + int index = NMTUtil::flag_to_index(flag); + return &_virtual_memory[index]; + } + + inline VirtualMemory* by_index(int index) { + assert(index >= 0, "Index out of bound"); + assert(index < mt_number_of_types, "Index out of bound"); + return &_virtual_memory[index]; + } + + inline size_t total_reserved() const { + size_t amount = 0; + for (int index = 0; index < mt_number_of_types; index ++) { + amount += _virtual_memory[index].reserved(); + } + return amount; + } + + inline size_t total_committed() const { + size_t amount = 0; + for (int index = 0; index < mt_number_of_types; index ++) { + amount += _virtual_memory[index].committed(); + } + return amount; + } + + void copy_to(VirtualMemorySnapshot* s) { + for (int index = 0; index < mt_number_of_types; index ++) { + s->_virtual_memory[index] = _virtual_memory[index]; + } + } +}; + +class VirtualMemorySummary : AllStatic { + public: + static void initialize(); + + static inline void record_reserved_memory(size_t size, MEMFLAGS flag) { + as_snapshot()->by_type(flag)->reserve_memory(size); + } + + static inline void record_committed_memory(size_t size, MEMFLAGS flag) { + as_snapshot()->by_type(flag)->commit_memory(size); + } + + static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) { + as_snapshot()->by_type(flag)->uncommit_memory(size); + } + + static inline void record_released_memory(size_t size, MEMFLAGS flag) { + as_snapshot()->by_type(flag)->release_memory(size); + } + + // Move virtual memory from one memory type to another. + // Virtual memory can be reserved before it is associated with a memory type, and tagged + // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown' + // type to specified memory type. + static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) { + as_snapshot()->by_type(from)->release_memory(size); + as_snapshot()->by_type(to)->reserve_memory(size); + } + + static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) { + as_snapshot()->by_type(from)->uncommit_memory(size); + as_snapshot()->by_type(to)->commit_memory(size); + } + + static inline void snapshot(VirtualMemorySnapshot* s) { + as_snapshot()->copy_to(s); + } + + static VirtualMemorySnapshot* as_snapshot() { + return (VirtualMemorySnapshot*)_snapshot; + } + + private: + static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; +}; + + + +/* + * A virtual memory region + */ +class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC { + private: + address _base_address; + size_t _size; + + public: + VirtualMemoryRegion(address addr, size_t size) : + _base_address(addr), _size(size) { + assert(addr != NULL, "Invalid address"); + assert(size > 0, "Invalid size"); + } + + inline address base() const { return _base_address; } + inline address end() const { return base() + size(); } + inline size_t size() const { return _size; } + + inline bool is_empty() const { return size() == 0; } + + inline bool contain_address(address addr) const { + return (addr >= base() && addr < end()); + } + + + inline bool contain_region(address addr, size_t size) const { + return contain_address(addr) && contain_address(addr + size - 1); + } + + inline bool same_region(address addr, size_t sz) const { + return (addr == base() && sz == size()); + } + + + inline bool overlap_region(address addr, size_t sz) const { + VirtualMemoryRegion rgn(addr, sz); + return contain_address(addr) || + contain_address(addr + sz - 1) || + rgn.contain_address(base()) || + rgn.contain_address(end() - 1); + } + + inline bool adjacent_to(address addr, size_t sz) const { + return (addr == end() || (addr + sz) == base()); + } + + void exclude_region(address addr, size_t sz) { + assert(contain_region(addr, sz), "Not containment"); + assert(addr == base() || addr + sz == end(), "Can not exclude from middle"); + size_t new_size = size() - sz; + + if (addr == base()) { + set_base(addr + sz); + } + set_size(new_size); + } + + void expand_region(address addr, size_t sz) { + assert(adjacent_to(addr, sz), "Not adjacent regions"); + if (base() == addr + sz) { + set_base(addr); + } + set_size(size() + sz); + } + + protected: + void set_base(address base) { + assert(base != NULL, "Sanity check"); + _base_address = base; + } + + void set_size(size_t size) { + assert(size > 0, "Sanity check"); + _size = size; + } +}; + + +class CommittedMemoryRegion : public VirtualMemoryRegion { + private: + NativeCallStack _stack; + + public: + CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) : + VirtualMemoryRegion(addr, size), _stack(stack) { } + + inline int compare(const CommittedMemoryRegion& rgn) const { + if (overlap_region(rgn.base(), rgn.size()) || + adjacent_to (rgn.base(), rgn.size())) { + return 0; + } else { + if (base() == rgn.base()) { + return 0; + } else if (base() > rgn.base()) { + return 1; + } else { + return -1; + } + } + } + + inline bool equals(const CommittedMemoryRegion& rgn) const { + return compare(rgn) == 0; + } + + inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; } + inline const NativeCallStack* call_stack() const { return &_stack; } +}; + + +typedef LinkedListIterator CommittedRegionIterator; + +int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&); +class ReservedMemoryRegion : public VirtualMemoryRegion { + private: + SortedLinkedList + _committed_regions; + + NativeCallStack _stack; + MEMFLAGS _flag; + + bool _all_committed; + + public: + ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack, + MEMFLAGS flag = mtNone) : + VirtualMemoryRegion(base, size), _stack(stack), _flag(flag), + _all_committed(false) { } + + + ReservedMemoryRegion(address base, size_t size) : + VirtualMemoryRegion(base, size), _stack(NativeCallStack::EMPTY_STACK), _flag(mtNone), + _all_committed(false) { } + + // Copy constructor + ReservedMemoryRegion(const ReservedMemoryRegion& rr) : + VirtualMemoryRegion(rr.base(), rr.size()) { + *this = rr; + } + + inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; } + inline const NativeCallStack* call_stack() const { return &_stack; } + + void set_flag(MEMFLAGS flag); + inline MEMFLAGS flag() const { return _flag; } + + inline int compare(const ReservedMemoryRegion& rgn) const { + if (overlap_region(rgn.base(), rgn.size())) { + return 0; + } else { + if (base() == rgn.base()) { + return 0; + } else if (base() > rgn.base()) { + return 1; + } else { + return -1; + } + } + } + + inline bool equals(const ReservedMemoryRegion& rgn) const { + return compare(rgn) == 0; + } + + bool add_committed_region(address addr, size_t size, const NativeCallStack& stack); + bool remove_uncommitted_region(address addr, size_t size); + + size_t committed_size() const; + + // move committed regions that higher than specified address to + // the new region + void move_committed_regions(address addr, ReservedMemoryRegion& rgn); + + inline bool all_committed() const { return _all_committed; } + void set_all_committed(bool b); + + CommittedRegionIterator iterate_committed_regions() const { + return CommittedRegionIterator(_committed_regions.head()); + } + + ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) { + set_base(other.base()); + set_size(other.size()); + + _stack = *other.call_stack(); + _flag = other.flag(); + _all_committed = other.all_committed(); + if (other.all_committed()) { + set_all_committed(true); + } else { + CommittedRegionIterator itr = other.iterate_committed_regions(); + const CommittedMemoryRegion* rgn = itr.next(); + while (rgn != NULL) { + _committed_regions.add(*rgn); + rgn = itr.next(); + } + } + return *this; + } + + private: + // The committed region contains the uncommitted region, subtract the uncommitted + // region from this committed region + bool remove_uncommitted_region(LinkedListNode* node, + address addr, size_t sz); + + bool add_committed_region(const CommittedMemoryRegion& rgn) { + assert(rgn.base() != NULL, "Invalid base address"); + assert(size() > 0, "Invalid size"); + return _committed_regions.add(rgn) != NULL; + } +}; + +int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2); + +class VirtualMemoryWalker : public StackObj { + public: + virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; } +}; + +// Main class called from MemTracker to track virtual memory allocations, commits and releases. +class VirtualMemoryTracker : AllStatic { + public: + static bool initialize(NMT_TrackingLevel level); + + // Late phase initialization + static bool late_initialize(NMT_TrackingLevel level); + + static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, + MEMFLAGS flag = mtNone, bool all_committed = false); + + static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack); + static bool remove_uncommitted_region (address base_addr, size_t size); + static bool remove_released_region (address base_addr, size_t size); + static void set_reserved_region_type (address addr, MEMFLAGS flag); + + // Walk virtual memory data structure for creating baseline, etc. + static bool walk_virtual_memory(VirtualMemoryWalker* walker); + + static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to); + + private: + static SortedLinkedList* _reserved_regions; +}; + + +#endif // INCLUDE_NMT + +#endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP --- ./hotspot/src/share/vm/trace/noTraceBackend.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/trace/noTraceBackend.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -41,4 +41,4 @@ typedef NoTraceBackend Tracing; -#endif +#endif // SHARE_VM_TRACE_NOTRACEBACKEND_HPP --- ./hotspot/src/share/vm/trace/traceBackend.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/trace/traceBackend.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -25,9 +25,7 @@ #define SHARE_VM_TRACE_TRACEBACKEND_HPP #include "utilities/macros.hpp" - #if INCLUDE_TRACE - #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "trace/traceTime.hpp" @@ -58,9 +56,7 @@ typedef TraceBackend Tracing; -#else /* INCLUDE_TRACE */ - +#else // !INCLUDE_TRACE #include "trace/noTraceBackend.hpp" - -#endif /* INCLUDE_TRACE */ -#endif /* SHARE_VM_TRACE_TRACEBACKEND_HPP */ +#endif // INCLUDE_TRACE +#endif // SHARE_VM_TRACE_TRACEBACKEND_HPP --- ./hotspot/src/share/vm/trace/traceEvent.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/trace/traceEvent.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -33,7 +33,6 @@ }; #if INCLUDE_TRACE - #include "trace/traceBackend.hpp" #include "trace/tracing.hpp" #include "tracefiles/traceEventIds.hpp" @@ -154,6 +153,5 @@ } }; -#endif /* INCLUDE_TRACE */ - -#endif /* SHARE_VM_TRACE_TRACEEVENT_HPP */ +#endif // INCLUDE_TRACE +#endif // SHARE_VM_TRACE_TRACEEVENT_HPP --- ./hotspot/src/share/vm/trace/traceEventClasses.xsl Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/trace/traceEventClasses.xsl Wed Feb 04 12:14:39 2015 -0800 @@ -41,17 +41,14 @@ #include "trace/traceEvent.hpp" #include "utilities/macros.hpp" #include "utilities/ticks.hpp" - #if INCLUDE_TRACE - - #include "trace/traceStream.hpp" #include "utilities/ostream.hpp" -#else +#else // !INCLUDE_TRACE class TraceEvent { public: @@ -65,9 +62,8 @@ -#endif - -#endif +#endif // INCLUDE_TRACE +#endif // TRACEFILES_TRACEEVENTCLASSES_HPP --- ./hotspot/src/share/vm/trace/traceEventIds.xsl Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/trace/traceEventIds.xsl Wed Feb 04 12:14:39 2015 -0800 @@ -29,13 +29,11 @@ -#ifndef TRACEFILES_JFREVENTIDS_HPP -#define TRACEFILES_JFREVENTIDS_HPP +#ifndef TRACEFILES_TRACEEVENTIDS_HPP +#define TRACEFILES_TRACEEVENTIDS_HPP #include "utilities/macros.hpp" - #if INCLUDE_TRACE - #include "trace/traceDataTypes.hpp" /** @@ -67,8 +65,8 @@ typedef enum TraceEventId TraceEventId; typedef enum TraceStructId TraceStructId; -#endif -#endif +#endif // INCLUDE_TRACE +#endif // TRACEFILES_TRACEEVENTIDS_HPP --- ./hotspot/src/share/vm/trace/traceMacros.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/trace/traceMacros.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_TRACE_TRACE_MACRO_HPP -#define SHARE_VM_TRACE_TRACE_MACRO_HPP +#ifndef SHARE_VM_TRACE_TRACEMACROS_HPP +#define SHARE_VM_TRACE_TRACEMACROS_HPP #define EVENT_THREAD_EXIT(thread) #define EVENT_THREAD_DESTRUCT(thread) @@ -41,4 +41,4 @@ #define TRACE_TEMPLATES(template) #define TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias) -#endif +#endif // SHARE_VM_TRACE_TRACEMACROS_HPP --- ./hotspot/src/share/vm/trace/traceStream.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/trace/traceStream.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,9 +26,7 @@ #define SHARE_VM_TRACE_TRACESTREAM_HPP #include "utilities/macros.hpp" - #if INCLUDE_TRACE - #include "oops/klass.hpp" #include "oops/method.hpp" #include "oops/symbol.hpp" @@ -117,5 +115,5 @@ } }; -#endif /* INCLUDE_TRACE */ -#endif /* SHARE_VM_TRACE_TRACESTREAM_HPP */ +#endif // INCLUDE_TRACE +#endif // SHARE_VM_TRACE_TRACESTREAM_HPP --- ./hotspot/src/share/vm/trace/traceTypes.xsl Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/trace/traceTypes.xsl Wed Feb 04 12:14:39 2015 -0800 @@ -29,15 +29,14 @@ -#ifndef TRACEFILES_JFRTYPES_HPP -#define TRACEFILES_JFRTYPES_HPP +#ifndef TRACEFILES_TRACETYPES_HPP +#define TRACEFILES_TRACETYPES_HPP #include "oops/symbol.hpp" #include "trace/traceDataTypes.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ticks.hpp" - enum JVMContentType { _not_a_content_type = (JVM_CONTENT_TYPES_START - 1), @@ -58,7 +57,7 @@ }; /** - * Create typedefs for the JRA types: + * Create typedefs for the TRACE types: * typedef s8 TYPE_LONG; * typedef s4 TYPE_INTEGER; * typedef const char * TYPE_STRING; @@ -68,7 +67,7 @@ typedef TYPE_; -#endif // JFRFILES_JFRTYPES_HPP +#endif // TRACEFILES_TRACETYPES_HPP --- ./hotspot/src/share/vm/trace/tracetypes.xml Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/trace/tracetypes.xml Wed Feb 04 12:14:39 2015 -0800 @@ -98,6 +98,7 @@ + ) + MAX2(length - 1, 0) * sizeof(T); } + // WhiteBox API helper. + // Can't distinguish between array of length 0 and length 1, + // will always return 0 in those cases. + static int bytes_to_length(size_t bytes) { + assert(is_size_aligned(bytes, BytesPerWord), "Must be, for now"); + + if (sizeof(Array) >= bytes) { + return 0; + } + + size_t left = bytes - sizeof(Array); + assert(is_size_aligned(left, sizeof(T)), "Must be"); + + size_t elements = left / sizeof(T); + assert(elements <= (size_t)INT_MAX, err_msg("number of elements " SIZE_FORMAT "doesn't fit into an int.", elements)); + + int length = (int)elements; + + assert((size_t)size(length) * BytesPerWord == bytes, + err_msg("Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, + bytes, (size_t)size(length) * BytesPerWord)); + + return length; + } + explicit Array(int length) : _length(length) { assert(length >= 0, "illegal length"); } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/utilities/chunkedList.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "utilities/chunkedList.hpp" +#include "utilities/debug.hpp" + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +template +class TestChunkedList { + typedef ChunkedList ChunkedListT; + + public: + static void testEmpty() { + ChunkedListT buffer; + assert(buffer.size() == 0, "assert"); + } + + static void testFull() { + ChunkedListT buffer; + for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) { + buffer.push((T)i); + } + assert(buffer.size() == ChunkedListT::BufferSize, "assert"); + assert(buffer.is_full(), "assert"); + } + + static void testSize() { + ChunkedListT buffer; + for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) { + assert(buffer.size() == i, "assert"); + buffer.push((T)i); + assert(buffer.size() == i + 1, "assert"); + } + } + + static void testClear() { + ChunkedListT buffer; + + buffer.clear(); + assert(buffer.size() == 0, "assert"); + + for (uintptr_t i = 0; i < ChunkedListT::BufferSize / 2; i++) { + buffer.push((T)i); + } + buffer.clear(); + assert(buffer.size() == 0, "assert"); + + for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) { + buffer.push((T)i); + } + buffer.clear(); + assert(buffer.size() == 0, "assert"); + } + + static void testAt() { + ChunkedListT buffer; + + for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) { + buffer.push((T)i); + assert(buffer.at(i) == (T)i, "assert"); + } + + for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) { + assert(buffer.at(i) == (T)i, "assert"); + } + } + + static void test() { + testEmpty(); + testFull(); + testSize(); + testClear(); + testAt(); + } +}; + +class Metadata; + +void TestChunkedList_test() { + TestChunkedList::test(); + TestChunkedList::test(); +} + +#endif --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/utilities/chunkedList.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_UTILITIES_CHUNKED_LIST_HPP +#define SHARE_VM_UTILITIES_CHUNKED_LIST_HPP + +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" + +template class ChunkedList : public CHeapObj { + template friend class TestChunkedList; + + static const size_t BufferSize = 64; + + T _values[BufferSize]; + T* _top; + + ChunkedList* _next_used; + ChunkedList* _next_free; + + T const * end() const { + return &_values[BufferSize]; + } + + public: + ChunkedList() : _top(_values), _next_used(NULL), _next_free(NULL) {} + + bool is_full() const { + return _top == end(); + } + + void clear() { + _top = _values; + // Don't clear the next pointers since that would interfere + // with other threads trying to iterate through the lists. + } + + void push(T m) { + assert(!is_full(), "Buffer is full"); + *_top = m; + _top++; + } + + void set_next_used(ChunkedList* buffer) { _next_used = buffer; } + void set_next_free(ChunkedList* buffer) { _next_free = buffer; } + + ChunkedList* next_used() const { return _next_used; } + ChunkedList* next_free() const { return _next_free; } + + size_t size() const { + return pointer_delta(_top, _values, sizeof(T)); + } + + T at(size_t i) { + assert(i < size(), err_msg("IOOBE i: " SIZE_FORMAT " size(): " SIZE_FORMAT, i, size())); + return _values[i]; + } +}; + +#endif // SHARE_VM_UTILITIES_CHUNKED_LIST_HPP --- ./hotspot/src/share/vm/utilities/debug.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/debug.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -266,17 +266,19 @@ "native memory for metadata", "shared read only space", "shared read write space", - "shared miscellaneous data space" + "shared miscellaneous data space", + "shared miscellaneous code space" }; static const char* flag[] = { "Metaspace", "SharedReadOnlySize", "SharedReadWriteSize", - "SharedMiscDataSize" + "SharedMiscDataSize", + "SharedMiscCodeSize" }; warning("\nThe %s is not large enough\n" - "to preload requested classes. Use -XX:%s=\n" + "to preload requested classes. Use -XX:%s=\n" "to increase the initial size of %s.\n", name[shared_space], flag[shared_space], name[shared_space]); exit(2); --- ./hotspot/src/share/vm/utilities/debug.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/debug.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -246,7 +246,8 @@ SharedPermGen, SharedReadOnly, SharedReadWrite, - SharedMiscData + SharedMiscData, + SharedMiscCode }; void report_out_of_shared_space(SharedSpaceType space_type); --- ./hotspot/src/share/vm/utilities/exceptions.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/exceptions.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -85,9 +85,13 @@ #endif // ASSERT if (thread->is_VM_thread() - || thread->is_Compiler_thread() ) { + || thread->is_Compiler_thread() + || DumpSharedSpaces ) { // We do not care what kind of exception we get for the vm-thread or a thread which // is compiling. We just install a dummy exception object + // + // We also cannot throw a proper exception when dumping, because we cannot run + // Java bytecodes now. A dummy exception will suffice. thread->set_pending_exception(Universe::vm_exception(), file, line); return true; } @@ -108,9 +112,13 @@ } if (thread->is_VM_thread() - || thread->is_Compiler_thread() ) { + || thread->is_Compiler_thread() + || DumpSharedSpaces ) { // We do not care what kind of exception we get for the vm-thread or a thread which // is compiling. We just install a dummy exception object + // + // We also cannot throw a proper exception when dumping, because we cannot run + // Java bytecodes now. A dummy exception will suffice. thread->set_pending_exception(Universe::vm_exception(), file, line); return true; } --- ./hotspot/src/share/vm/utilities/globalDefinitions.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/globalDefinitions.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -558,6 +558,27 @@ return fabs(value); } +//---------------------------------------------------------------------------------------------------- +// Special casts +// Cast floats into same-size integers and vice-versa w/o changing bit-pattern +typedef union { + jfloat f; + jint i; +} FloatIntConv; + +typedef union { + jdouble d; + jlong l; + julong ul; +} DoubleLongConv; + +inline jint jint_cast (jfloat x) { return ((FloatIntConv*)&x)->i; } +inline jfloat jfloat_cast (jint x) { return ((FloatIntConv*)&x)->f; } + +inline jlong jlong_cast (jdouble x) { return ((DoubleLongConv*)&x)->l; } +inline julong julong_cast (jdouble x) { return ((DoubleLongConv*)&x)->ul; } +inline jdouble jdouble_cast (jlong x) { return ((DoubleLongConv*)&x)->d; } + inline jint low (jlong value) { return jint(value); } inline jint high(jlong value) { return jint(value >> 32); } --- ./hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -167,17 +167,6 @@ typedef uint32_t juint; typedef uint64_t julong; -//---------------------------------------------------------------------------------------------------- -// Special (possibly not-portable) casts -// Cast floats into same-size integers and vice-versa w/o changing bit-pattern -// %%%%%% These seem like standard C++ to me--how about factoring them out? - Ungar - -inline jint jint_cast (jfloat x) { return *(jint* )&x; } -inline jlong jlong_cast (jdouble x) { return *(jlong* )&x; } -inline julong julong_cast (jdouble x) { return *(julong* )&x; } - -inline jfloat jfloat_cast (jint x) { return *(jfloat* )&x; } -inline jdouble jdouble_cast(jlong x) { return *(jdouble*)&x; } //---------------------------------------------------------------------------------------------------- // Constant for jlong (specifying an long long canstant is C++ compiler specific) --- ./hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -183,15 +183,6 @@ typedef unsigned int juint; typedef unsigned long long julong; -//---------------------------------------------------------------------------------------------------- -// Special (possibly not-portable) casts -// Cast floats into same-size integers and vice-versa w/o changing bit-pattern - -inline jint jint_cast (jfloat x) { return *(jint* )&x; } -inline jlong jlong_cast (jdouble x) { return *(jlong* )&x; } - -inline jfloat jfloat_cast (jint x) { return *(jfloat* )&x; } -inline jdouble jdouble_cast(jlong x) { return *(jdouble*)&x; } //---------------------------------------------------------------------------------------------------- // Constant for jlong (specifying an long long constant is C++ compiler specific) --- ./hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -116,16 +116,6 @@ typedef unsigned int juint; typedef unsigned __int64 julong; -//---------------------------------------------------------------------------------------------------- -// Special (possibly not-portable) casts -// Cast floats into same-size integers and vice-versa w/o changing bit-pattern - -inline jint jint_cast (jfloat x) { return *(jint* )&x; } -inline jlong jlong_cast (jdouble x) { return *(jlong* )&x; } - -inline jfloat jfloat_cast (jint x) { return *(jfloat* )&x; } -inline jdouble jdouble_cast(jlong x) { return *(jdouble*)&x; } - //---------------------------------------------------------------------------------------------------- // Non-standard stdlib-like stuff: --- ./hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -114,16 +114,6 @@ typedef uint32_t juint; typedef uint64_t julong; -//---------------------------------------------------------------------------------------------------- -// Special (possibly not-portable) casts -// Cast floats into same-size integers and vice-versa w/o changing bit-pattern -// %%%%%% These seem like standard C++ to me--how about factoring them out? - Ungar - -inline jint jint_cast (jfloat x) { return *(jint* )&x; } -inline jlong jlong_cast (jdouble x) { return *(jlong* )&x; } - -inline jfloat jfloat_cast (jint x) { return *(jfloat* )&x; } -inline jdouble jdouble_cast(jlong x) { return *(jdouble*)&x; } //---------------------------------------------------------------------------------------------------- // Constant for jlong (specifying an long long canstant is C++ compiler specific) --- ./hotspot/src/share/vm/utilities/growableArray.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/growableArray.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -147,6 +147,9 @@ } }; +template class GrowableArrayIterator; +template class GrowableArrayFilterIterator; + template class GrowableArray : public GenericGrowableArray { friend class VMStructs; @@ -243,6 +246,14 @@ return _data[_len-1]; } + GrowableArrayIterator begin() const { + return GrowableArrayIterator(this, 0); + } + + GrowableArrayIterator end() const { + return GrowableArrayIterator(this, length()); + } + void push(const E& elem) { append(elem); } E pop() { @@ -338,6 +349,7 @@ // inserts the given element before the element at index i void insert_before(const int idx, const E& elem) { + assert(0 <= idx && idx <= _len, "illegal index"); check_nesting(); if (_len == _max) grow(_len); for (int j = _len - 1; j >= idx; j--) { @@ -349,7 +361,7 @@ void appendAll(const GrowableArray* l) { for (int i = 0; i < l->_len; i++) { - raw_at_put_grow(_len, l->_data[i], 0); + raw_at_put_grow(_len, l->_data[i], E()); } } @@ -412,4 +424,83 @@ tty->print("}\n"); } +// Custom STL-style iterator to iterate over GrowableArrays +// It is constructed by invoking GrowableArray::begin() and GrowableArray::end() +template class GrowableArrayIterator : public StackObj { + friend class GrowableArray; + template friend class GrowableArrayFilterIterator; + + private: + const GrowableArray* _array; // GrowableArray we iterate over + int _position; // The current position in the GrowableArray + + // Private constructor used in GrowableArray::begin() and GrowableArray::end() + GrowableArrayIterator(const GrowableArray* array, int position) : _array(array), _position(position) { + assert(0 <= position && position <= _array->length(), "illegal position"); + } + + public: + GrowableArrayIterator& operator++() { ++_position; return *this; } + E operator*() { return _array->at(_position); } + + bool operator==(const GrowableArrayIterator& rhs) { + assert(_array == rhs._array, "iterator belongs to different array"); + return _position == rhs._position; + } + + bool operator!=(const GrowableArrayIterator& rhs) { + assert(_array == rhs._array, "iterator belongs to different array"); + return _position != rhs._position; + } +}; + +// Custom STL-style iterator to iterate over elements of a GrowableArray that satisfy a given predicate +template class GrowableArrayFilterIterator : public StackObj { + friend class GrowableArray; + + private: + const GrowableArray* _array; // GrowableArray we iterate over + int _position; // Current position in the GrowableArray + UnaryPredicate _predicate; // Unary predicate the elements of the GrowableArray should satisfy + + public: + GrowableArrayFilterIterator(const GrowableArrayIterator& begin, UnaryPredicate filter_predicate) + : _array(begin._array), _position(begin._position), _predicate(filter_predicate) { + // Advance to first element satisfying the predicate + while(_position != _array->length() && !_predicate(_array->at(_position))) { + ++_position; + } + } + + GrowableArrayFilterIterator& operator++() { + do { + // Advance to next element satisfying the predicate + ++_position; + } while(_position != _array->length() && !_predicate(_array->at(_position))); + return *this; + } + + E operator*() { return _array->at(_position); } + + bool operator==(const GrowableArrayIterator& rhs) { + assert(_array == rhs._array, "iterator belongs to different array"); + return _position == rhs._position; + } + + bool operator!=(const GrowableArrayIterator& rhs) { + assert(_array == rhs._array, "iterator belongs to different array"); + return _position != rhs._position; + } + + bool operator==(const GrowableArrayFilterIterator& rhs) { + assert(_array == rhs._array, "iterator belongs to different array"); + return _position == rhs._position; + } + + bool operator!=(const GrowableArrayFilterIterator& rhs) { + assert(_array == rhs._array, "iterator belongs to different array"); + return _position != rhs._position; + } +}; + #endif // SHARE_VM_UTILITIES_GROWABLEARRAY_HPP --- ./hotspot/src/share/vm/utilities/hashtable.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/hashtable.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -36,21 +36,22 @@ #include "utilities/numberSeq.hpp" -// This is a generic hashtable, designed to be used for the symbol -// and string tables. -// -// It is implemented as an open hash table with a fixed number of buckets. -// -// %note: -// - HashtableEntrys are allocated in blocks to reduce the space overhead. +// This hashtable is implemented as an open hash table with a fixed number of buckets. -template BasicHashtableEntry* BasicHashtable::new_entry(unsigned int hashValue) { - BasicHashtableEntry* entry; - - if (_free_list) { +template BasicHashtableEntry* BasicHashtable::new_entry_free_list() { + BasicHashtableEntry* entry = NULL; + if (_free_list != NULL) { entry = _free_list; _free_list = _free_list->next(); - } else { + } + return entry; +} + +// HashtableEntrys are allocated in blocks to reduce the space overhead. +template BasicHashtableEntry* BasicHashtable::new_entry(unsigned int hashValue) { + BasicHashtableEntry* entry = new_entry_free_list(); + + if (entry == NULL) { if (_first_free_entry + _entry_size >= _end_block) { int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries)); int len = _entry_size * block_size; @@ -83,9 +84,9 @@ // This is somewhat an arbitrary heuristic but if one bucket gets to // rehash_count which is currently 100, there's probably something wrong. -template bool BasicHashtable::check_rehash_table(int count) { - assert(table_size() != 0, "underflow"); - if (count > (((double)number_of_entries()/(double)table_size())*rehash_multiple)) { +template bool RehashableHashtable::check_rehash_table(int count) { + assert(this->table_size() != 0, "underflow"); + if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) { // Set a flag for the next safepoint, which should be at some guaranteed // safepoint interval. return true; @@ -93,13 +94,13 @@ return false; } -template juint Hashtable::_seed = 0; +template juint RehashableHashtable::_seed = 0; // Create a new table and using alternate hash code, populate the new table // with the existing elements. This can be used to change the hash code // and could in the future change the size of the table. -template void Hashtable::move_to(Hashtable* new_table) { +template void RehashableHashtable::move_to(RehashableHashtable* new_table) { // Initialize the global seed for hashing. _seed = AltHashing::compute_seed(); @@ -109,7 +110,7 @@ // Iterate through the table and create a new entry for the new table for (int i = 0; i < new_table->table_size(); ++i) { - for (HashtableEntry* p = bucket(i); p != NULL; ) { + for (HashtableEntry* p = this->bucket(i); p != NULL; ) { HashtableEntry* next = p->next(); T string = p->literal(); // Use alternate hashing algorithm on the symbol in the first table @@ -238,11 +239,11 @@ } } -template int Hashtable::literal_size(Symbol *symbol) { +template int RehashableHashtable::literal_size(Symbol *symbol) { return symbol->size() * HeapWordSize; } -template int Hashtable::literal_size(oop oop) { +template int RehashableHashtable::literal_size(oop oop) { // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true, // and the String.value array is shared by several Strings. However, starting from JDK8, // the String.value array is not shared anymore. @@ -255,12 +256,12 @@ // Note: if you create a new subclass of Hashtable, you will need to // add a new function Hashtable::literal_size(MyNewType lit) -template void Hashtable::dump_table(outputStream* st, const char *table_name) { +template void RehashableHashtable::dump_table(outputStream* st, const char *table_name) { NumberSeq summary; int literal_bytes = 0; for (int i = 0; i < this->table_size(); ++i) { int count = 0; - for (HashtableEntry* e = bucket(i); + for (HashtableEntry* e = this->bucket(i); e != NULL; e = e->next()) { count++; literal_bytes += literal_size(e->literal()); @@ -270,7 +271,7 @@ double num_buckets = summary.num(); double num_entries = summary.sum(); - int bucket_bytes = (int)num_buckets * sizeof(bucket(0)); + int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket); int entry_bytes = (int)num_entries * sizeof(HashtableEntry); int total_bytes = literal_bytes + bucket_bytes + entry_bytes; @@ -352,12 +353,20 @@ #endif // Explicitly instantiate these types +#if INCLUDE_ALL_GCS +template class Hashtable; +template class HashtableEntry; +template class BasicHashtable; +#endif template class Hashtable; +template class RehashableHashtable; +template class RehashableHashtable; template class Hashtable; template class Hashtable; template class Hashtable; #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS) template class Hashtable; +template class RehashableHashtable; #endif // SOLARIS || CHECK_UNHANDLED_OOPS template class Hashtable; template class Hashtable; --- ./hotspot/src/share/vm/utilities/hashtable.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/hashtable.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -178,11 +178,6 @@ void verify_lookup_length(double load); #endif - enum { - rehash_count = 100, - rehash_multiple = 60 - }; - void initialize(int table_size, int entry_size, int number_of_entries); // Accessor @@ -194,12 +189,12 @@ // The following method is not MT-safe and must be done under lock. BasicHashtableEntry** bucket_addr(int i) { return _buckets[i].entry_addr(); } + // Attempt to get an entry from the free list + BasicHashtableEntry* new_entry_free_list(); + // Table entry management BasicHashtableEntry* new_entry(unsigned int hashValue); - // Check that the table is unbalanced - bool check_rehash_table(int count); - // Used when moving the entry to another table // Clean up links, but do not add to free_list void unlink_entry(BasicHashtableEntry* entry) { @@ -277,8 +272,30 @@ return (HashtableEntry**)BasicHashtable::bucket_addr(i); } +}; + +template class RehashableHashtable : public Hashtable { + protected: + + enum { + rehash_count = 100, + rehash_multiple = 60 + }; + + // Check that the table is unbalanced + bool check_rehash_table(int count); + + public: + RehashableHashtable(int table_size, int entry_size) + : Hashtable(table_size, entry_size) { } + + RehashableHashtable(int table_size, int entry_size, + HashtableBucket* buckets, int number_of_entries) + : Hashtable(table_size, entry_size, buckets, number_of_entries) { } + + // Function to move these elements into the new table. - void move_to(Hashtable* new_table); + void move_to(RehashableHashtable* new_table); static bool use_alternate_hashcode() { return _seed != 0; } static juint seed() { return _seed; } @@ -292,7 +309,6 @@ static int literal_size(ConstantPool *cp) {Unimplemented(); return 0;} static int literal_size(Klass *k) {Unimplemented(); return 0;} -public: void dump_table(outputStream* st, const char *table_name); private: --- ./hotspot/src/share/vm/utilities/hashtable.inline.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/hashtable.inline.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -26,6 +26,7 @@ #define SHARE_VM_UTILITIES_HASHTABLE_INLINE_HPP #include "memory/allocation.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "utilities/hashtable.hpp" #include "utilities/dtrace.hpp" --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/utilities/linkedlist.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +#include "runtime/os.hpp" +#include "utilities/linkedlist.hpp" +#include "memory/allocation.hpp" +#include "memory/allocation.inline.hpp" + +class Integer : public StackObj { + private: + int _value; + public: + Integer(int i) : _value(i) { } + + int value() const { return _value; } + bool equals(const Integer& i) const { + return _value == i.value(); + } +}; + +int compare_Integer(const Integer& i1, const Integer& i2) { + return i1.value() - i2.value(); +} + +void check_list_values(const int* expected, const LinkedList* list) { + LinkedListNode* head = list->head(); + int index = 0; + while (head != NULL) { + assert(head->peek()->value() == expected[index], "Unexpected value"); + head = head->next(); + index ++; + } +} + +void Test_linked_list() { + LinkedListImpl ll; + + + // Test regular linked list + assert(ll.is_empty(), "Start with empty list"); + Integer one(1), two(2), three(3), four(4), five(5), six(6); + + ll.add(six); + assert(!ll.is_empty(), "Should not be empty"); + + Integer* i = ll.find(six); + assert(i != NULL, "Should find it"); + + i = ll.find(three); + assert(i == NULL, "Not in the list"); + + LinkedListNode* node = ll.find_node(six); + assert(node != NULL, "6 is in the list"); + + ll.insert_after(three, node); + ll.insert_before(one, node); + int expected[3] = {1, 6, 3}; + check_list_values(expected, &ll); + + ll.add(two); + ll.add(four); + ll.add(five); + + // Test sorted linked list + SortedLinkedList sl; + assert(sl.is_empty(), "Start with empty list"); + + size_t ll_size = ll.size(); + sl.move(&ll); + size_t sl_size = sl.size(); + + assert(ll_size == sl_size, "Should be the same size"); + assert(ll.is_empty(), "No more entires"); + + // sorted result + int sorted_result[] = {1, 2, 3, 4, 5, 6}; + check_list_values(sorted_result, &sl); + + node = sl.find_node(four); + assert(node != NULL, "4 is in the list"); + sl.remove_before(node); + sl.remove_after(node); + int remains[] = {1, 2, 4, 6}; + check_list_values(remains, &sl); +} +#endif // PRODUCT + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/utilities/linkedlist.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_UTILITIES_LINKED_LIST_HPP +#define SHARE_VM_UTILITIES_LINKED_LIST_HPP + +#include "memory/allocation.hpp" + +/* + * The implementation of a generic linked list, which uses various + * backing storages, such as C heap, arena and resource, etc. + */ + + +// An entry in a linked list. It should use the same backing storage +// as the linked list that contains this entry. +template class LinkedListNode : public ResourceObj { + private: + E _data; // embedded content + LinkedListNode* _next; // next entry + + protected: + LinkedListNode() : _next(NULL) { } + + public: + LinkedListNode(const E& e): _data(e), _next(NULL) { } + + inline void set_next(LinkedListNode* node) { _next = node; } + inline LinkedListNode * next() const { return _next; } + + E* data() { return &_data; } + const E* peek() const { return &_data; } +}; + +// A linked list interface. It does not specify +// any storage type it uses, so all methods involving +// memory allocation or deallocation are pure virtual +template class LinkedList : public ResourceObj { + protected: + LinkedListNode* _head; + + public: + LinkedList() : _head(NULL) { } + + inline void set_head(LinkedListNode* h) { _head = h; } + inline LinkedListNode* head() const { return _head; } + inline bool is_empty() const { return head() == NULL; } + + inline size_t size() const { + LinkedListNode* p; + size_t count = 0; + for (p = head(); p != NULL; count++, p = p->next()); + return count; + } + + // Move all entries from specified linked list to this one + virtual void move(LinkedList* list) = 0; + + // Add an entry to this linked list + virtual LinkedListNode* add(const E& e) = 0; + // Add all entries from specified linked list to this one, + virtual void add(LinkedListNode* node) = 0; + + // Add a linked list to this linked list + virtual bool add(const LinkedList* list) = 0; + + // Search entry in the linked list + virtual LinkedListNode* find_node(const E& e) = 0; + virtual E* find(const E& e) = 0; + + // Insert entry to the linked list + virtual LinkedListNode* insert_before(const E& e, LinkedListNode* ref) = 0; + virtual LinkedListNode* insert_after (const E& e, LinkedListNode* ref) = 0; + + // Remove entry from the linked list + virtual bool remove(const E& e) = 0; + virtual bool remove(LinkedListNode* node) = 0; + virtual bool remove_before(LinkedListNode* ref) = 0; + virtual bool remove_after(LinkedListNode* ref) = 0; + + LinkedListNode* unlink_head() { + LinkedListNode* h = this->head(); + if (h != NULL) { + this->set_head(h->next()); + } + return h; + } + + DEBUG_ONLY(virtual ResourceObj::allocation_type storage_type() = 0;) +}; + +// A linked list implementation. +// The linked list can be allocated in various type of memory: C heap, arena and resource area, etc. +template + class LinkedListImpl : public LinkedList { + protected: + Arena* _arena; + public: + LinkedListImpl() : _arena(NULL) { } + LinkedListImpl(Arena* a) : _arena(a) { } + + virtual ~LinkedListImpl() { + clear(); + } + + virtual void clear() { + LinkedListNode* p = this->head(); + this->set_head(NULL); + while (p != NULL) { + LinkedListNode* to_delete = p; + p = p->next(); + delete_node(to_delete); + } + } + + // Add an entry to the linked list + virtual LinkedListNode* add(const E& e) { + LinkedListNode* node = this->new_node(e); + if (node != NULL) { + this->add(node); + } + + return node; + } + + virtual void add(LinkedListNode* node) { + assert(node != NULL, "NULL pointer"); + node->set_next(this->head()); + this->set_head(node); + } + + // Move a linked list to this linked list, both have to be allocated on the same + // storage type. + virtual void move(LinkedList* list) { + assert(list->storage_type() == this->storage_type(), "Different storage type"); + LinkedListNode* node = this->head(); + while (node != NULL && node->next() != NULL) { + node = node->next(); + } + if (node == NULL) { + this->set_head(list->head()); + } else { + node->set_next(list->head()); + } + // All entries are moved + list->set_head(NULL); + } + + virtual bool add(const LinkedList* list) { + LinkedListNode* node = list->head(); + while (node != NULL) { + if (this->add(*node->peek()) == NULL) { + return false; + } + node = node->next(); + } + return true; + } + + + virtual LinkedListNode* find_node(const E& e) { + LinkedListNode* p = this->head(); + while (p != NULL && !p->peek()->equals(e)) { + p = p->next(); + } + return p; + } + + E* find(const E& e) { + LinkedListNode* node = find_node(e); + return (node == NULL) ? NULL : node->data(); + } + + + // Add an entry in front of the reference entry + LinkedListNode* insert_before(const E& e, LinkedListNode* ref_node) { + LinkedListNode* node = this->new_node(e); + if (node == NULL) return NULL; + if (ref_node == this->head()) { + node->set_next(ref_node); + this->set_head(node); + } else { + LinkedListNode* p = this->head(); + while (p != NULL && p->next() != ref_node) { + p = p->next(); + } + assert(p != NULL, "ref_node not in the list"); + node->set_next(ref_node); + p->set_next(node); + } + return node; + } + + // Add an entry behind the reference entry + LinkedListNode* insert_after(const E& e, LinkedListNode* ref_node) { + LinkedListNode* node = this->new_node(e); + if (node == NULL) return NULL; + node->set_next(ref_node->next()); + ref_node->set_next(node); + return node; + } + + // Remove an entry from the linked list. + // Return true if the entry is successfully removed + virtual bool remove(const E& e) { + LinkedListNode* tmp = this->head(); + LinkedListNode* prev = NULL; + + while (tmp != NULL) { + if (tmp->peek()->equals(e)) { + return remove_after(prev); + } + prev = tmp; + tmp = tmp->next(); + } + return false; + } + + // Remove the node after the reference entry + virtual bool remove_after(LinkedListNode* prev) { + LinkedListNode* to_delete; + if (prev == NULL) { + to_delete = this->unlink_head(); + } else { + to_delete = prev->next(); + if (to_delete != NULL) { + prev->set_next(to_delete->next()); + } + } + + if (to_delete != NULL) { + delete_node(to_delete); + return true; + } + return false; + } + + virtual bool remove(LinkedListNode* node) { + LinkedListNode* p = this->head(); + while (p != NULL && p->next() != node) { + p = p->next(); + } + if (p != NULL) { + p->set_next(node->next()); + delete_node(node); + return true; + } else { + return false; + } + } + + virtual bool remove_before(LinkedListNode* ref) { + assert(ref != NULL, "NULL pointer"); + LinkedListNode* p = this->head(); + LinkedListNode* to_delete = NULL; // to be deleted + LinkedListNode* prev = NULL; // node before the node to be deleted + while (p != NULL && p != ref) { + prev = to_delete; + to_delete = p; + p = p->next(); + } + if (p == NULL || to_delete == NULL) return false; + assert(to_delete->next() == ref, "Wrong node to delete"); + assert(prev == NULL || prev->next() == to_delete, + "Sanity check"); + if (prev == NULL) { + assert(to_delete == this->head(), "Must be head"); + this->set_head(to_delete->next()); + } else { + prev->set_next(to_delete->next()); + } + delete_node(to_delete); + return true; + } + + DEBUG_ONLY(ResourceObj::allocation_type storage_type() { return T; }) + protected: + // Create new linked list node object in specified storage + LinkedListNode* new_node(const E& e) const { + switch(T) { + case ResourceObj::ARENA: { + assert(_arena != NULL, "Arena not set"); + return new(_arena) LinkedListNode(e); + } + case ResourceObj::RESOURCE_AREA: + case ResourceObj::C_HEAP: { + if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { + return new(std::nothrow, T, F) LinkedListNode(e); + } else { + return new(T, F) LinkedListNode(e); + } + } + default: + ShouldNotReachHere(); + } + return NULL; + } + + // Delete linked list node object + void delete_node(LinkedListNode* node) { + if (T == ResourceObj::C_HEAP) { + delete node; + } + } +}; + +// Sorted linked list. The linked list maintains sorting order specified by the comparison +// function +template + class SortedLinkedList : public LinkedListImpl { + public: + SortedLinkedList() { } + SortedLinkedList(Arena* a) : LinkedListImpl(a) { } + + virtual LinkedListNode* add(const E& e) { + return LinkedListImpl::add(e); + } + + virtual void move(LinkedList* list) { + assert(list->storage_type() == this->storage_type(), "Different storage type"); + LinkedListNode* node; + while ((node = list->unlink_head()) != NULL) { + this->add(node); + } + assert(list->is_empty(), "All entries are moved"); + } + + virtual void add(LinkedListNode* node) { + assert(node != NULL, "NULL pointer"); + LinkedListNode* tmp = this->head(); + LinkedListNode* prev = NULL; + + int cmp_val; + while (tmp != NULL) { + cmp_val = FUNC(*tmp->peek(), *node->peek()); + if (cmp_val >= 0) { + break; + } + prev = tmp; + tmp = tmp->next(); + } + + if (prev != NULL) { + node->set_next(prev->next()); + prev->set_next(node); + } else { + node->set_next(this->head()); + this->set_head(node); + } + } + + virtual bool add(const LinkedList* list) { + return LinkedListImpl::add(list); + } + + virtual LinkedListNode* find_node(const E& e) { + LinkedListNode* p = this->head(); + + while (p != NULL) { + int comp_val = FUNC(*p->peek(), e); + if (comp_val == 0) { + return p; + } else if (comp_val > 0) { + return NULL; + } + p = p->next(); + } + return NULL; + } +}; + +// Iterates all entries in the list +template class LinkedListIterator : public StackObj { + private: + LinkedListNode* _p; + bool _is_empty; + public: + LinkedListIterator(LinkedListNode* head) : _p(head) { + _is_empty = (head == NULL); + } + + bool is_empty() const { return _is_empty; } + + const E* next() { + if (_p == NULL) return NULL; + const E* e = _p->peek(); + _p = _p->next(); + return e; + } +}; + +#endif --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/utilities/nativeCallStack.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "runtime/os.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/nativeCallStack.hpp" + +const NativeCallStack NativeCallStack::EMPTY_STACK(0, false); + +NativeCallStack::NativeCallStack(int toSkip, bool fillStack) : + _hash_value(0) { + +#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED + fillStack = false; +#endif + + if (fillStack) { + os::get_native_stack(_stack, NMT_TrackingStackDepth, toSkip); + } else { + for (int index = 0; index < NMT_TrackingStackDepth; index ++) { + _stack[index] = NULL; + } + } +} + +NativeCallStack::NativeCallStack(address* pc, int frameCount) { + int frameToCopy = (frameCount < NMT_TrackingStackDepth) ? + frameCount : NMT_TrackingStackDepth; + int index; + for (index = 0; index < frameToCopy; index ++) { + _stack[index] = pc[index]; + } + for (; index < NMT_TrackingStackDepth; index ++) { + _stack[index] = NULL; + } +} + +// number of stack frames captured +int NativeCallStack::frames() const { + int index; + for (index = 0; index < NMT_TrackingStackDepth; index ++) { + if (_stack[index] == NULL) { + break; + } + } + return index; +} + +// Hash code. Any better algorithm? +int NativeCallStack::hash() const { + long hash_val = _hash_value; + if (hash_val == 0) { + long pc; + int index; + for (index = 0; index < NMT_TrackingStackDepth; index ++) { + pc = (long)_stack[index]; + if (pc == 0) break; + hash_val += pc; + } + + NativeCallStack* p = const_cast(this); + p->_hash_value = (int)(hash_val & 0xFFFFFFFF); + } + return _hash_value; +} + +void NativeCallStack::print_on(outputStream* out) const { + print_on(out, 0); +} + +// Decode and print this call path +void NativeCallStack::print_on(outputStream* out, int indent) const { + address pc; + char buf[1024]; + int offset; + if (is_empty()) { + for (int index = 0; index < indent; index ++) out->print(" "); +#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED + out->print("[BOOTSTRAP]"); +#else + out->print("[No stack]"); +#endif + } else { + for (int frame = 0; frame < NMT_TrackingStackDepth; frame ++) { + pc = get_frame(frame); + if (pc == NULL) break; + // Print indent + for (int index = 0; index < indent; index ++) out->print(" "); + if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) { + out->print_cr("[" PTR_FORMAT "] %s+0x%x", p2i(pc), buf, offset); + } else { + out->print_cr("[" PTR_FORMAT "]", p2i(pc)); + } + } + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/utilities/nativeCallStack.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_UTILITIES_NATIVE_CALL_STACK_HPP +#define SHARE_VM_UTILITIES_NATIVE_CALL_STACK_HPP + +#include "memory/allocation.hpp" +#include "services/nmtCommon.hpp" +#include "utilities/ostream.hpp" + +/* + * This class represents a native call path (does not include Java frame) + * + * This class is developed in the context of native memory tracking, it can + * be an useful tool for debugging purpose. + * + * For example, following code should print out native call path: + * + * .... + * NativeCallStack here; + * here.print_on(tty); + * .... + * + * However, there are a couple of restrictions on this class. If the restrictions are + * not strictly followed, it may break native memory tracking badly. + * + * 1. Number of stack frames to capture, is defined by native memory tracking. + * This number has impacts on how much memory to be used by native + * memory tracking. + * 2. The class is strict stack object, no heap or virtual memory can be allocated + * from it. + */ +class NativeCallStack : public StackObj { + public: + static const NativeCallStack EMPTY_STACK; + + private: + address _stack[NMT_TrackingStackDepth]; + int _hash_value; + + public: + NativeCallStack(int toSkip = 0, bool fillStack = false); + NativeCallStack(address* pc, int frameCount); + + + // if it is an empty stack + inline bool is_empty() const { + return _stack[0] == NULL; + } + + // number of stack frames captured + int frames() const; + + inline int compare(const NativeCallStack& other) const { + return memcmp(_stack, other._stack, sizeof(_stack)); + } + + inline bool equals(const NativeCallStack& other) const { + // compare hash values + if (hash() != other.hash()) return false; + // compare each frame + return compare(other) == 0; + } + + inline address get_frame(int index) const { + assert(index >= 0 && index < NMT_TrackingStackDepth, "Index out of bound"); + return _stack[index]; + } + + // Hash code. Any better algorithm? + int hash() const; + + void print_on(outputStream* out) const; + void print_on(outputStream* out, int indent) const; +}; + +#endif --- ./hotspot/src/share/vm/utilities/ostream.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/ostream.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "compiler/compileLog.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "oops/oop.inline.hpp" #include "runtime/arguments.hpp" #include "utilities/defaultStream.hpp" @@ -240,6 +241,14 @@ return; } +void outputStream::gclog_stamp(const GCId& gc_id) { + date_stamp(PrintGCDateStamps); + stamp(PrintGCTimeStamps); + if (PrintGCID) { + print("#%u: ", gc_id.id()); + } +} + outputStream& outputStream::indent() { while (_position < _indentation) sp(); return *this; @@ -356,6 +365,7 @@ xmlStream* xtty; outputStream* tty; outputStream* gclog_or_tty; +CDS_ONLY(fileStream* classlist_file;) // Only dump the classes that can be stored into the CDS archive extern Mutex* tty_lock; #define EXTRACHARLEN 32 @@ -470,7 +480,8 @@ return buf; } -// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name +// log_name comes from -XX:LogFile=log_name, -Xloggc:log_name or +// -XX:DumpLoadedClassList= // in log_name, %p => pid1234 and // %t => YYYY-MM-DD_HH-MM-SS static const char* make_log_name(const char* log_name, const char* force_directory) { @@ -1191,6 +1202,16 @@ gclog_or_tty = gclog; } +#if INCLUDE_CDS + // For -XX:DumpLoadedClassList= option + if (DumpLoadedClassList != NULL) { + const char* list_name = make_log_name(DumpLoadedClassList, NULL); + classlist_file = new(ResourceObj::C_HEAP, mtInternal) + fileStream(list_name); + FREE_C_HEAP_ARRAY(char, list_name, mtInternal); + } +#endif + // If we haven't lazily initialized the logfile yet, do it now, // to avoid the possibility of lazy initialization during a VM // crash, which can affect the stability of the fatal error handler. @@ -1203,6 +1224,11 @@ static bool ostream_exit_called = false; if (ostream_exit_called) return; ostream_exit_called = true; +#if INCLUDE_CDS + if (classlist_file != NULL) { + delete classlist_file; + } +#endif if (gclog_or_tty != tty) { delete gclog_or_tty; } --- ./hotspot/src/share/vm/utilities/ostream.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/ostream.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,6 +28,7 @@ #include "memory/allocation.hpp" #include "runtime/timer.hpp" +class GCId; DEBUG_ONLY(class ResourceMark;) // Output streams for printing @@ -107,6 +108,7 @@ void date_stamp(bool guard) { date_stamp(guard, "", ": "); } + void gclog_stamp(const GCId& gc_id); // portable printing of 64 bit integers void print_jlong(jlong value); @@ -212,6 +214,8 @@ void flush(); }; +CDS_ONLY(extern fileStream* classlist_file;) + // unlike fileStream, fdStream does unbuffered I/O by calling // open() and write() directly. It is async-safe, but output // from multiple thread may be mixed together. Used by fatal --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/utilities/stringUtils.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "utilities/stringUtils.hpp" + +int StringUtils::replace_no_expand(char* string, const char* from, const char* to) { + int replace_count = 0; + size_t from_len = strlen(from); + size_t to_len = strlen(to); + assert(from_len >= to_len, "must not expand input"); + + for (char* dst = string; *dst && (dst = strstr(dst, from)) != NULL;) { + char* left_over = dst + from_len; + memmove(dst, to, to_len); // does not copy trailing 0 of + dst += to_len; // skip over the replacement. + memmove(dst, left_over, strlen(left_over) + 1); // copies the trailing 0 of + ++ replace_count; + } + + return replace_count; +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/src/share/vm/utilities/stringUtils.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_UTILITIES_STRINGUTILS_HPP +#define SHARE_VM_UTILITIES_STRINGUTILS_HPP + +#include "memory/allocation.hpp" + +class StringUtils : AllStatic { +public: + // Replace the substring with another string . must be + // no longer than . The input string is modified in-place. + // + // Replacement is done in a single pass left-to-right. So replace_no_expand("aaa", "aa", "a") + // will result in "aa", not "a". + // + // Returns the count of substrings that have been replaced. + static int replace_no_expand(char* string, const char* from, const char* to); +}; + +#endif // SHARE_VM_UTILITIES_STRINGUTILS_HPP --- ./hotspot/src/share/vm/utilities/taskqueue.hpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/taskqueue.hpp Wed Feb 04 12:14:39 2015 -0800 @@ -28,40 +28,8 @@ #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" #include "runtime/mutex.hpp" +#include "runtime/orderAccess.inline.hpp" #include "utilities/stack.hpp" -#ifdef TARGET_OS_ARCH_linux_x86 -# include "orderAccess_linux_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_sparc -# include "orderAccess_linux_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_zero -# include "orderAccess_linux_zero.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_x86 -# include "orderAccess_solaris_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_sparc -# include "orderAccess_solaris_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_windows_x86 -# include "orderAccess_windows_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_arm -# include "orderAccess_linux_arm.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_ppc -# include "orderAccess_linux_ppc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_aix_ppc -# include "orderAccess_aix_ppc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_x86 -# include "orderAccess_bsd_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_zero -# include "orderAccess_bsd_zero.inline.hpp" -#endif // Simple TaskQueue stats that are collected by default in debug builds. --- ./hotspot/src/share/vm/utilities/vmError.cpp Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/src/share/vm/utilities/vmError.cpp Wed Feb 04 12:14:39 2015 -0800 @@ -31,7 +31,7 @@ #include "runtime/frame.inline.hpp" #include "runtime/init.hpp" #include "runtime/os.hpp" -#include "runtime/thread.hpp" +#include "runtime/thread.inline.hpp" #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" #include "services/memTracker.hpp" @@ -773,6 +773,11 @@ st->cr(); } + STEP(228, "(Native Memory Tracking)" ) + if (_verbose) { + MemTracker::error_report(st); + } + STEP(230, "" ) if (_verbose) { @@ -897,9 +902,6 @@ static bool log_done = false; // done saving error log static bool transmit_report_done = false; // done error reporting - // disble NMT to avoid further exception - MemTracker::shutdown(MemTracker::NMT_error_reporting); - if (SuppressFatalErrorMessage) { os::abort(); } --- ./hotspot/test/Makefile Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/Makefile Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1995, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -23,14 +23,36 @@ # # -# Makefile to run various jdk tests +# Makefile to run various hotspot tests # GETMIXEDPATH=echo -# Get OS/ARCH specifics -OSNAME = $(shell uname -s) -ifeq ($(OSNAME), SunOS) +# Utilities used +AWK = awk +CAT = cat +CD = cd +CHMOD = chmod +CP = cp +CUT = cut +DIRNAME = dirname +ECHO = echo +EGREP = egrep +EXPAND = expand +FIND = find +MKDIR = mkdir +PWD = pwd +SED = sed +SORT = sort +TEE = tee +UNAME = uname +UNIQ = uniq +WC = wc +ZIP = zip + +# Get OS name from uname (Cygwin inexplicably adds _NT-5.1) +UNAME_S := $(shell $(UNAME) -s | $(CUT) -f1 -d_) +ifeq ($(UNAME_S), SunOS) PLATFORM = solaris SLASH_JAVA = /java ARCH = $(shell uname -p) @@ -38,7 +60,7 @@ ARCH=i586 endif endif -ifeq ($(OSNAME), Linux) +ifeq ($(UNAME_S), Linux) PLATFORM = linux SLASH_JAVA = /java ARCH = $(shell uname -m) @@ -46,7 +68,7 @@ ARCH = i586 endif endif -ifeq ($(OSNAME), Darwin) +ifeq ($(UNAME_S), Darwin) PLATFORM = bsd SLASH_JAVA = /java ARCH = $(shell uname -m) @@ -54,7 +76,7 @@ ARCH = i586 endif endif -ifeq ($(findstring BSD,$(OSNAME)), BSD) +ifeq ($(findstring BSD,$(UNAME_S)), BSD) PLATFORM = bsd SLASH_JAVA = /java ARCH = $(shell uname -m) @@ -63,12 +85,12 @@ endif endif ifeq ($(PLATFORM),) - # detect wether we're running in MKS or cygwin - ifeq ($(OSNAME), Windows_NT) # MKS + # detect whether we're running in MKS or cygwin + ifeq ($(UNAME_S), Windows_NT) # MKS GETMIXEDPATH=dosname -s endif - ifeq ($(findstring CYGWIN,$(OSNAME)), CYGWIN) - GETMIXEDPATH=cygpath -m -s + ifeq ($(findstring CYGWIN,$(UNAME_S)), CYGWIN) + GETMIXEDPATH=cygpath -m endif PLATFORM = windows SLASH_JAVA = J: @@ -92,13 +114,6 @@ SLASH_JAVA = $(ALT_SLASH_JAVA) endif -# Utilities used -CD = cd -CP = cp -ECHO = echo -MKDIR = mkdir -ZIP = zip - # Root of this test area (important to use full paths in some places) TEST_ROOT := $(shell pwd) @@ -136,17 +151,83 @@ endif # How to create the test bundle (pass or fail, we want to create this) -BUNDLE_UP = ( $(MKDIR) -p `dirname $(ARCHIVE_BUNDLE)` \ - && $(CD) $(ABS_TEST_OUTPUT_DIR) \ - && $(ZIP) -q -r $(ARCHIVE_BUNDLE) . ) -BUNDLE_UP_FAILED = ( exitCode=$$? && $(BUNDLE_UP) && exit $${exitCode} ) +# Follow command with ";$(BUNDLE_UP_AND_EXIT)", so it always gets executed. +ZIP_UP_RESULTS = ( $(MKDIR) -p `$(DIRNAME) $(ARCHIVE_BUNDLE)` \ + && $(CD) $(ABS_TEST_OUTPUT_DIR) \ + && $(CHMOD) -R a+r . \ + && $(ZIP) -q -r $(ARCHIVE_BUNDLE) . ) + +# important results files +SUMMARY_TXT = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/JTreport/text/summary.txt") +STATS_TXT_NAME = Stats.txt +STATS_TXT = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/$(STATS_TXT_NAME)") +RUNLIST = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/runlist.txt") +PASSLIST = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/passlist.txt") +FAILLIST = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/faillist.txt") +EXITCODE = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/exitcode.txt") + +TESTEXIT = \ + if [ ! -s $(EXITCODE) ] ; then \ + $(ECHO) "ERROR: EXITCODE file not filled in."; \ + $(ECHO) "1" > $(EXITCODE); \ + fi ; \ + testExitCode=`$(CAT) $(EXITCODE)`; \ + $(ECHO) "EXIT CODE: $${testExitCode}"; \ + exit $${testExitCode} + +BUNDLE_UP_AND_EXIT = \ +( \ + jtregExitCode=$$? && \ + _summary="$(SUMMARY_TXT)"; \ + $(RM) -f $(STATS_TXT) $(RUNLIST) $(PASSLIST) $(FAILLIST) $(EXITCODE); \ + $(ECHO) "$${jtregExitCode}" > $(EXITCODE); \ + if [ -r "$${_summary}" ] ; then \ + $(ECHO) "Summary: $(UNIQUE_DIR)" > $(STATS_TXT); \ + $(EXPAND) $${_summary} | $(EGREP) -v ' Not run\.' > $(RUNLIST); \ + $(EGREP) ' Passed\.' $(RUNLIST) \ + | $(EGREP) -v ' Error\.' \ + | $(EGREP) -v ' Failed\.' > $(PASSLIST); \ + ( $(EGREP) ' Failed\.' $(RUNLIST); \ + $(EGREP) ' Error\.' $(RUNLIST); \ + $(EGREP) -v ' Passed\.' $(RUNLIST) ) \ + | $(SORT) | $(UNIQ) > $(FAILLIST); \ + if [ $${jtregExitCode} != 0 -o -s $(FAILLIST) ] ; then \ + $(EXPAND) $(FAILLIST) \ + | $(CUT) -d' ' -f1 \ + | $(SED) -e 's@^@FAILED: @' >> $(STATS_TXT); \ + if [ $${jtregExitCode} = 0 ] ; then \ + jtregExitCode=1; \ + fi; \ + fi; \ + runc="`$(CAT) $(RUNLIST) | $(WC) -l | $(AWK) '{print $$1;}'`"; \ + passc="`$(CAT) $(PASSLIST) | $(WC) -l | $(AWK) '{print $$1;}'`"; \ + failc="`$(CAT) $(FAILLIST) | $(WC) -l | $(AWK) '{print $$1;}'`"; \ + exclc="FIXME CODETOOLS-7900176"; \ + $(ECHO) "TEST STATS: name=$(UNIQUE_DIR) run=$${runc} pass=$${passc} fail=$${failc}" \ + >> $(STATS_TXT); \ + else \ + $(ECHO) "Missing file: $${_summary}" >> $(STATS_TXT); \ + fi; \ + if [ -f $(STATS_TXT) ] ; then \ + $(CAT) $(STATS_TXT); \ + fi; \ + $(ZIP_UP_RESULTS) ; \ + $(TESTEXIT) \ +) ################################################################ # Default make rule (runs jtreg_tests) -all: jtreg_tests +all: hotspot_all @$(ECHO) "Testing completed successfully" +# Support "hotspot_" prefixed test make targets (too) +# The hotspot_% targets are used by the top level Makefile +# Unless explicitly defined below, hotspot_ is interpreted as a jtreg test group name +hotspot_%: + $(ECHO) "Running tests: $@" + $(MAKE) -j 1 TEST_SELECTION=":$@" UNIQUE_DIR=$@ jtreg_tests; + # Prep for output prep: clean @$(MKDIR) -p $(ABS_TEST_OUTPUT_DIR) @@ -163,20 +244,37 @@ # Expect JT_HOME to be set for jtreg tests. (home for jtreg) ifndef JT_HOME - JT_HOME = $(SLASH_JAVA)/re/jtreg/4.0/promoted/latest/binaries/jtreg -endif -ifdef JPRT_JTREG_HOME - JT_HOME = $(JPRT_JTREG_HOME) + JT_HOME = $(SLASH_JAVA)/re/jtreg/4.1/promoted/latest/binaries/jtreg + ifdef JPRT_JTREG_HOME + JT_HOME = $(JPRT_JTREG_HOME) + endif endif -# Expect JPRT to set TESTDIRS to the jtreg test dirs -JTREG_TESTDIRS = demo/jvmti/gctest demo/jvmti/hprof +# When called from JPRT the TESTDIRS variable is set to the jtreg tests to run ifdef TESTDIRS - JTREG_TESTDIRS = $(TESTDIRS) + TEST_SELECTION = $(TESTDIRS) endif -# Default JTREG to run (win32 script works for everybody) -JTREG = $(JT_HOME)/win32/bin/jtreg +ifdef CONCURRENCY + EXTRA_JTREG_OPTIONS += -concurrency:$(CONCURRENCY) +endif + +# Default JTREG to run +JTREG = $(JT_HOME)/bin/jtreg + +# Only run automatic tests +JTREG_BASIC_OPTIONS += -a +# Report details on all failed or error tests, times too +JTREG_BASIC_OPTIONS += -v:fail,error,time +# Retain all files for failing tests +JTREG_BASIC_OPTIONS += -retain:fail,error +# Ignore tests are not run and completely silent about it +JTREG_IGNORE_OPTION = -ignore:quiet +JTREG_BASIC_OPTIONS += $(JTREG_IGNORE_OPTION) +# Add any extra options +JTREG_BASIC_OPTIONS += $(EXTRA_JTREG_OPTIONS) +# Set other vm and test options +JTREG_TEST_OPTIONS = $(JAVA_ARGS:%=-javaoptions:%) $(JAVA_OPTIONS:%=-vmoption:%) $(JAVA_VM_ARGS:%=-vmoption:%) # Option to tell jtreg to not run tests marked with "ignore" ifeq ($(PLATFORM), windows) @@ -184,20 +282,26 @@ else JTREG_KEY_OPTION = -k:\!ignore endif +JTREG_BASIC_OPTIONS += $(JTREG_KEY_OPTION) + +# Make sure jtreg exists +$(JTREG): $(JT_HOME) -#EXTRA_JTREG_OPTIONS = - -jtreg_tests: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG) - $(JTREG) -a -v:fail,error \ - $(JTREG_KEY_OPTION) \ - $(EXTRA_JTREG_OPTIONS) \ - -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTreport \ - -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTwork \ - -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)") \ - $(JAVA_OPTIONS:%=-vmoption:%) \ - $(JTREG_TESTDIRS) \ - || $(BUNDLE_UP_FAILED) - $(BUNDLE_UP) +jtreg_tests: prep $(PRODUCT_HOME) $(JTREG) + ( \ + ( JT_HOME=$(shell $(GETMIXEDPATH) "$(JT_HOME)"); \ + export JT_HOME; \ + $(shell $(GETMIXEDPATH) "$(JTREG)") \ + $(JTREG_BASIC_OPTIONS) \ + -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/JTreport") \ + -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/JTwork") \ + -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)") \ + $(JTREG_EXCLUSIONS) \ + $(JTREG_TEST_OPTIONS) \ + $(TEST_SELECTION) \ + ) ; \ + $(BUNDLE_UP_AND_EXIT) \ + ) 2>&1 | $(TEE) $(ABS_TEST_OUTPUT_DIR)/output.txt ; $(TESTEXIT) PHONY_LIST += jtreg_tests @@ -205,7 +309,7 @@ # clienttest (make sure various basic java client options work) -clienttest: prep $(PRODUCT_HOME) +hotspot_clienttest clienttest: prep $(PRODUCT_HOME) $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -version $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X @@ -213,73 +317,38 @@ $(RM) $(PRODUCT_HOME)/jre/bin/client/classes.jsa $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -Xshare:dump -PHONY_LIST += clienttest +PHONY_LIST += hotspot_clienttest clienttest + +################################################################ + +# minimaltest (make sure various basic java minimal options work) + +hotspot_minimaltest minimaltest: prep $(PRODUCT_HOME) + $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -version + $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help + $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X + +PHONY_LIST += hotspot_minimaltest minimaltest ################################################################ # servertest (make sure various basic java server options work) -servertest: prep $(PRODUCT_HOME) +hotspot_servertest servertest: prep $(PRODUCT_HOME) $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -version $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X -PHONY_LIST += servertest +PHONY_LIST += hotspot_servertest servertest ################################################################ # internalvmtests (run internal unit tests inside the VM) -internalvmtests: prep $(PRODUCT_HOME) +hotspot_internalvmtests internalvmtests: prep $(PRODUCT_HOME) $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -XX:+ExecuteInternalVMTests -version -PHONY_LIST += internalvmtests - -################################################################ - -# wbapitest (make sure the whitebox testing api classes work - -wbapitest: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG) - $(JTREG) -a -v:fail,error \ - $(JTREG_KEY_OPTION) \ - $(EXTRA_JTREG_OPTIONS) \ - -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTreport \ - -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTwork \ - -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)") \ - $(JAVA_OPTIONS:%=-vmoption:%) \ - $(shell $(GETMIXEDPATH) "$(TEST_ROOT)")/sanity \ - || $(BUNDLE_UP_FAILED) - $(BUNDLE_UP) - -PHONY_LIST += wbapitest - -################################################################ - -# packtest - -# Expect JPRT to set JPRT_PACKTEST_HOME. -PACKTEST_HOME = /net/jprt-web.sfbay.sun.com/jprt/allproducts/packtest -ifdef JPRT_PACKTEST_HOME - PACKTEST_HOME = $(JPRT_PACKTEST_HOME) -endif - -#EXTRA_PACKTEST_OPTIONS = - -packtest: prep $(PACKTEST_HOME)/ptest $(PRODUCT_HOME) - ( $(CD) $(PACKTEST_HOME) && \ - $(PACKTEST_HOME)/ptest \ - -t "$(PRODUCT_HOME)" \ - $(PACKTEST_STRESS_OPTION) \ - $(EXTRA_PACKTEST_OPTIONS) \ - -W $(ABS_TEST_OUTPUT_DIR) \ - $(JAVA_OPTIONS:%=-J %) \ - ) || $(BUNDLE_UP_FAILED) - $(BUNDLE_UP) - -packtest_stress: PACKTEST_STRESS_OPTION=-s -packtest_stress: packtest - -PHONY_LIST += packtest packtest_stress +PHONY_LIST += hotspot_internalvmtests internalvmtests ################################################################ @@ -287,4 +356,3 @@ .PHONY: all clean prep $(PHONY_LIST) ################################################################ - --- ./hotspot/test/TEST.ROOT Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/TEST.ROOT Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ # It also contains test-suite configuration information. # The list of keywords supported in this test suite -keys=cte_test jcmd nmt regression gc +keys=cte_test jcmd nmt regression gc stress groups=TEST.groups [closed/TEST.groups] +requires.properties=sun.arch.data.model --- ./hotspot/test/TEST.groups Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/TEST.groups Wed Feb 04 12:14:39 2015 -0800 @@ -73,15 +73,24 @@ runtime/jsig/Test8017498.sh \ runtime/Metaspace/FragmentMetaspace.java \ runtime/NMT/BaselineWithParameter.java \ + runtime/NMT/JcmdBaselineDetail.java \ + runtime/NMT/JcmdDetailDiff.java \ + runtime/NMT/JcmdScaleDetail.java \ runtime/NMT/JcmdScale.java \ runtime/NMT/JcmdWithNMTDisabled.java \ + runtime/NMT/MallocRoundingReportTest.java \ + runtime/NMT/MallocSiteHashOverflow.java \ + runtime/NMT/MallocStressTest.java \ runtime/NMT/MallocTestType.java \ + runtime/NMT/MallocTrackingVerify.java \ runtime/NMT/ReleaseCommittedMemory.java \ + runtime/NMT/ReleaseNoCommit.java \ runtime/NMT/ShutdownTwice.java \ runtime/NMT/SummaryAfterShutdown.java \ runtime/NMT/SummarySanityCheck.java \ runtime/NMT/ThreadedMallocTestType.java \ runtime/NMT/ThreadedVirtualAllocTestType.java \ + runtime/NMT/VirtualAllocCommitUncommitRecommit.java \ runtime/NMT/VirtualAllocTestType.java \ runtime/RedefineObject/TestRedefineObject.java \ runtime/Thread/TestThreadDumpMonitorContention.java \ @@ -115,6 +124,27 @@ -:needs_jdk +# When called from top level the test suites use the hotspot_ prefix +hotspot_wbapitest = \ + sanity/ + +hotspot_compiler = \ + sanity/ExecuteInternalVMTests.java + +hotspot_gc = \ + sanity/ExecuteInternalVMTests.java + +hotspot_runtime = \ + sanity/ExecuteInternalVMTests.java + +hotspot_serviceability = \ + sanity/ExecuteInternalVMTests.java + +hotspot_all = \ + :hotspot_compiler \ + :hotspot_gc \ + :hotspot_runtime \ + :hotspot_serviceability # Tests that require compact3 API's # needs_compact3 = \ @@ -129,10 +159,19 @@ gc/6581734/Test6581734.java \ gc/7072527/TestFullGCCount.java \ gc/g1/TestHumongousAllocInitialMark.java \ + gc/g1/TestHumongousShrinkHeap.java \ gc/arguments/TestG1HeapRegionSize.java \ gc/metaspace/TestMetaspaceMemoryPool.java \ gc/arguments/TestDynMinHeapFreeRatio.java \ gc/arguments/TestDynMaxHeapFreeRatio.java \ + gc/g1/TestShrinkAuxiliaryData00.java \ + gc/g1/TestShrinkAuxiliaryData05.java \ + gc/g1/TestShrinkAuxiliaryData10.java \ + gc/g1/TestShrinkAuxiliaryData15.java \ + gc/g1/TestShrinkAuxiliaryData20.java \ + gc/g1/TestShrinkAuxiliaryData25.java \ + gc/g1/TestShrinkAuxiliaryData30.java \ + gc/survivorAlignment \ runtime/InternalApi/ThreadCpuTimesDeadlock.java \ serviceability/threads/TestFalseDeadLock.java \ serviceability/jvmti/GetObjectSizeOverflow.java \ @@ -169,6 +208,8 @@ # needs_full_vm_compact1 = \ runtime/NMT \ + gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java \ + gc/class_unloading/TestG1ClassUnloadingHWM.java \ gc/g1/TestRegionAlignment.java \ gc/g1/TestShrinkToOneRegion.java \ gc/metaspace/G1AddMetaspaceDependency.java \ --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/EliminateAutoBox/UnsignedLoads.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @library /testlibrary + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox + * -XX:CompileOnly=::valueOf,::byteValue,::shortValue,::testUnsignedByte,::testUnsignedShort + * UnsignedLoads + */ +import static com.oracle.java.testlibrary.Asserts.assertEQ; + +public class UnsignedLoads { + public static int testUnsignedByte() { + byte[] bytes = new byte[] {-1}; + int res = 0; + for (int i = 0; i < 100000; i++) { + for (Byte b : bytes) { + res = b & 0xff; + } + } + return res; + } + + public static int testUnsignedShort() { + int res = 0; + short[] shorts = new short[] {-1}; + for (int i = 0; i < 100000; i++) { + for (Short s : shorts) { + res = s & 0xffff; + } + } + return res; + } + + public static void main(String[] args) { + assertEQ(testUnsignedByte(), 255); + assertEQ(testUnsignedShort(), 65535); + System.out.println("TEST PASSED"); + } +} --- ./hotspot/test/compiler/ciReplay/common.sh Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/ciReplay/common.sh Wed Feb 04 12:14:39 2015 -0800 @@ -213,7 +213,7 @@ -XX:VMThreadStackSize=512 \ -XX:CompilerThreadStackSize=512 \ -XX:ParallelGCThreads=1 \ - -XX:CICompilerCount=1 \ + -XX:CICompilerCount=2 \ -Xcomp \ -XX:CICrashAt=1 \ -XX:+CreateMinidumpOnCrash \ --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sun.hotspot.WhiteBox; +import sun.misc.Unsafe; +import sun.misc.IOUtils; + +import java.lang.reflect.Method; +import java.net.URL; +import java.net.URLConnection; + +/* + * @test TestAnonymousClassUnloading + * @bug 8054402 + * @summary "Tests unloading of anonymous classes." + * @library /testlibrary /testlibrary/whitebox + * @compile TestAnonymousClassUnloading.java + * @run main ClassFileInstaller TestAnonymousClassUnloading + * sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation TestAnonymousClassUnloading + */ +public class TestAnonymousClassUnloading { + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + private static final Unsafe UNSAFE = Unsafe.getUnsafe(); + private static int COMP_LEVEL_SIMPLE = 1; + private static int COMP_LEVEL_FULL_OPTIMIZATION = 4; + + /** + * We override hashCode here to be able to access this implementation + * via an Object reference (we cannot cast to TestAnonymousClassUnloading). + */ + @Override + public int hashCode() { + return 42; + } + + /** + * Does some work by using the anonymousClass. + * @param anonymousClass Class performing some work (will be unloaded) + */ + static private void doWork(Class anonymousClass) throws InstantiationException, IllegalAccessException { + // Create a new instance + Object anon = anonymousClass.newInstance(); + // We would like to call a method of anonymousClass here but we cannot cast because the class + // was loaded by a different class loader. One solution would be to use reflection but since + // we want C2 to implement the call as an IC we call Object::hashCode() here which actually + // calls anonymousClass::hashCode(). C2 will then implement this call as an IC. + if (anon.hashCode() != 42) { + new RuntimeException("Work not done"); + } + } + + /** + * Makes sure that method is compiled by forcing compilation if not yet compiled. + * @param m Method to be checked + */ + static private void makeSureIsCompiled(Method m) { + // Make sure background compilation is disabled + if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) { + throw new RuntimeException("Background compilation enabled"); + } + + // Check if already compiled + if (!WHITE_BOX.isMethodCompiled(m)) { + // If not, try to compile it with C2 + if(!WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION)) { + // C2 compiler not available, try to compile with C1 + WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_SIMPLE); + } + // Because background compilation is disabled, method should now be compiled + if(!WHITE_BOX.isMethodCompiled(m)) { + throw new RuntimeException(m + " not compiled"); + } + } + } + + /** + * This test creates stale Klass* metadata referenced by a compiled IC. + * + * The following steps are performed: + * (1) An anonymous version of TestAnonymousClassUnloading is loaded by a custom class loader + * (2) The method doWork that calls a method of the anonymous class is compiled. The call + * is implemented as an IC referencing Klass* metadata of the anonymous class. + * (3) Unloading of the anonymous class is enforced. The IC now references dead metadata. + */ + static public void main(String[] args) throws Exception { + // (1) Load an anonymous version of this class using the corresponding Unsafe method + URL classUrl = TestAnonymousClassUnloading.class.getResource("TestAnonymousClassUnloading.class"); + URLConnection connection = classUrl.openConnection(); + byte[] classBytes = IOUtils.readFully(connection.getInputStream(), connection.getContentLength(), true); + Class anonymousClass = UNSAFE.defineAnonymousClass(TestAnonymousClassUnloading.class, classBytes, null); + + // (2) Make sure all paths of doWork are profiled and compiled + for (int i = 0; i < 100000; ++i) { + doWork(anonymousClass); + } + + // Make sure doWork is compiled now + Method doWork = TestAnonymousClassUnloading.class.getDeclaredMethod("doWork", Class.class); + makeSureIsCompiled(doWork); + + // (3) Throw away reference to anonymousClass to allow unloading + anonymousClass = null; + + // Force garbage collection to trigger unloading of anonymousClass + // Dead metadata reference to anonymousClass triggers JDK-8054402 + WHITE_BOX.fullGC(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/classUnloading/methodUnloading/TestMethodUnloading.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sun.hotspot.WhiteBox; + +import java.lang.reflect.Method; +import java.net.URL; +import java.net.URLClassLoader; + +/* + * @test MethodUnloadingTest + * @bug 8029443 + * @summary "Tests the unloading of methods to to class unloading" + * @library /testlibrary /testlibrary/whitebox + * @build TestMethodUnloading + * @build WorkerClass + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation -XX:-UseCompressedOops -XX:CompileOnly=TestMethodUnloading::doWork TestMethodUnloading + */ +public class TestMethodUnloading { + private static final String workerClassName = "WorkerClass"; + private static int work = -1; + + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + private static int COMP_LEVEL_SIMPLE = 1; + private static int COMP_LEVEL_FULL_OPTIMIZATION = 4; + + /** + * Does some work by either using the workerClass or locally producing values. + * @param workerClass Class performing some work (will be unloaded) + * @param useWorker If true the workerClass is used + */ + static private void doWork(Class workerClass, boolean useWorker) throws InstantiationException, IllegalAccessException { + if (useWorker) { + // Create a new instance + Object worker = workerClass.newInstance(); + // We would like to call a method of WorkerClass here but we cannot cast to WorkerClass + // because the class was loaded by a different class loader. One solution would be to use + // reflection but since we want C2 to implement the call as an optimized IC we call + // Object::hashCode() here which actually calls WorkerClass::hashCode(). + // C2 will then implement this call as an optimized IC that points to a to-interpreter stub + // referencing the Method* for WorkerClass::hashCode(). + work = worker.hashCode(); + if (work != 42) { + new RuntimeException("Work not done"); + } + } else { + // Do some important work here + work = 1; + } + } + + /** + * Makes sure that method is compiled by forcing compilation if not yet compiled. + * @param m Method to be checked + */ + static private void makeSureIsCompiled(Method m) { + // Make sure background compilation is disabled + if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) { + throw new RuntimeException("Background compilation enabled"); + } + + // Check if already compiled + if (!WHITE_BOX.isMethodCompiled(m)) { + // If not, try to compile it with C2 + if(!WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION)) { + // C2 compiler not available, try to compile with C1 + WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_SIMPLE); + } + // Because background compilation is disabled, method should now be compiled + if(!WHITE_BOX.isMethodCompiled(m)) { + throw new RuntimeException(m + " not compiled"); + } + } + } + + /** + * This test creates stale Method* metadata in a to-interpreter stub of an optimized IC. + * + * The following steps are performed: + * (1) A workerClass is loaded by a custom class loader + * (2) The method doWork that calls a method of the workerClass is compiled. The call + * is implemented as an optimized IC calling a to-interpreted stub. The to-interpreter + * stub contains a Method* to a workerClass method. + * (3) Unloading of the workerClass is enforced. The to-interpreter stub now contains a dead Method*. + * (4) Depending on the implementation of the IC, the compiled version of doWork should still be + * valid. We call it again without using the workerClass. + */ + static public void main(String[] args) throws Exception { + // (1) Create a custom class loader with no parent class loader + URL url = TestMethodUnloading.class.getProtectionDomain().getCodeSource().getLocation(); + URLClassLoader loader = new URLClassLoader(new URL[] {url}, null); + + // Load worker class with custom class loader + Class workerClass = Class.forName(workerClassName, true, loader); + + // (2) Make sure all paths of doWork are profiled and compiled + for (int i = 0; i < 100000; ++i) { + doWork(workerClass, true); + doWork(workerClass, false); + } + + // Make sure doWork is compiled now + Method doWork = TestMethodUnloading.class.getDeclaredMethod("doWork", Class.class, boolean.class); + makeSureIsCompiled(doWork); + + // (3) Throw away class loader and reference to workerClass to allow unloading + loader.close(); + loader = null; + workerClass = null; + + // Force garbage collection to trigger unloading of workerClass + // Dead reference to WorkerClass::hashCode triggers JDK-8029443 + WHITE_BOX.fullGC(); + + // (4) Depending on the implementation of the IC, the compiled version of doWork + // may still be valid here. Execute it without a workerClass. + doWork(null, false); + if (work != 1) { + throw new RuntimeException("Work not done"); + } + + doWork(Object.class, false); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/classUnloading/methodUnloading/WorkerClass.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * Worker class that is dynamically loaded/unloaded by TestMethodUnloading. + */ +public class WorkerClass { + /** + * We override hashCode here to be able to access this implementation + * via an Object reference (we cannot cast to WorkerClass). + */ + @Override + public int hashCode() { + return 42; + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/dependencies/MonomorphicObjectCall/TestMonomorphicObjectCall.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.io.File; +import java.util.ArrayList; +import java.util.Collections; + +import com.oracle.java.testlibrary.*; + +/* + * @test + * @bug 8050079 + * @summary Compiles a monomorphic call to finalizeObject() on a modified java.lang.Object to test C1 CHA. + * @library /testlibrary + * @compile -XDignore.symbol.file java/lang/Object.java TestMonomorphicObjectCall.java + * @run main TestMonomorphicObjectCall + */ +public class TestMonomorphicObjectCall { + final static String testClasses = System.getProperty("test.classes") + File.separator; + + private static void callFinalize(Object object) throws Throwable { + // Call modified version of java.lang.Object::finalize() that is + // not overridden by any subclass. C1 CHA should mark the call site + // as monomorphic and inline the method. + object.finalizeObject(); + } + + public static void main(String[] args) throws Throwable { + if (args.length == 0) { + // Execute new instance with modified java.lang.Object + executeTestJvm(); + } else { + // Trigger compilation of 'callFinalize' + callFinalize(new Object()); + } + } + + public static void executeTestJvm() throws Throwable { + // Execute test with modified version of java.lang.Object + // in -Xbootclasspath. + String[] vmOpts = new String[] { + "-Xbootclasspath/p:" + testClasses, + "-Xcomp", + "-XX:-VerifyDependencies", + "-XX:CompileOnly=TestMonomorphicObjectCall::callFinalize", + "-XX:CompileOnly=Object::finalizeObject", + "-XX:TieredStopAtLevel=1", + TestMonomorphicObjectCall.class.getName(), + "true"}; + OutputAnalyzer output = ProcessTools.executeTestJvm(vmOpts); + output.shouldHaveExitValue(0); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/dependencies/MonomorphicObjectCall/java/lang/Object.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 1994, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * Slightly modified version of java.lang.Object that replaces + * finalize() by finalizeObject() to avoid overriding in subclasses. + */ +public class Object { + + private static native void registerNatives(); + static { + registerNatives(); + } + + public final native Class getClass(); + + public native int hashCode(); + + public boolean equals(Object obj) { + return (this == obj); + } + + protected native Object clone() throws CloneNotSupportedException; + + public String toString() { + return getClass().getName() + "@" + Integer.toHexString(hashCode()); + } + + public final native void notify(); + + public final native void notifyAll(); + + public final native void wait(long timeout) throws InterruptedException; + + public final void wait(long timeout, int nanos) throws InterruptedException { + if (timeout < 0) { + throw new IllegalArgumentException("timeout value is negative"); + } + + if (nanos < 0 || nanos > 999999) { + throw new IllegalArgumentException( + "nanosecond timeout value out of range"); + } + + if (nanos >= 500000 || (nanos != 0 && timeout == 0)) { + timeout++; + } + + wait(timeout); + } + + public final void wait() throws InterruptedException { + wait(0); + } + + /** + * Replaces original finalize() method and is therefore not + * overridden by any subclasses of Object. + * @throws Throwable + */ + // protected void finalize() throws Throwable { } + public void finalizeObject() throws Throwable { } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/exceptions/CatchInlineExceptions.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8059299 + * @summary assert(adr_type != NULL) failed: expecting TypeKlassPtr + * @run main/othervm -Xbatch CatchInlineExceptions + */ + +class Exception1 extends Exception {}; +class Exception2 extends Exception {}; + +public class CatchInlineExceptions { + private static int counter0; + private static int counter1; + private static int counter2; + private static int counter; + + static void foo(int i) throws Exception { + if ((i & 1023) == 2) { + counter0++; + throw new Exception2(); + } + } + + static void test(int i) throws Exception { + try { + foo(i); + } + catch (Exception e) { + if (e instanceof Exception1) { + counter1++; + } else if (e instanceof Exception2) { + counter2++; + } + counter++; + throw e; + } + } + + public static void main(String[] args) throws Throwable { + for (int i = 0; i < 15000; i++) { + try { + test(i); + } catch (Exception e) { + // expected + } + } + if (counter1 != 0) { + throw new RuntimeException("Failed: counter1(" + counter1 + ") != 0"); + } + if (counter2 != counter0) { + throw new RuntimeException("Failed: counter2(" + counter2 + ") != counter0(" + counter0 + ")"); + } + if (counter2 != counter) { + throw new RuntimeException("Failed: counter2(" + counter2 + ") != counter(" + counter + ")"); + } + System.out.println("TEST PASSED"); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/exceptions/SumTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8066900 + * @summary FP registers are not properly restored by C1 when handling exceptions + * @run main/othervm -Xbatch SumTest + * + */ +public class SumTest { + private static class Sum { + + double[] sums; + + /** + * Construct empty Sum + */ + public Sum() { + sums = new double[0]; + } + + /** + * Return the sum of all numbers added to this Sum + * + * @return the sum + */ + final public double getSum() { + double sum = 0; + for (final double s : sums) { + sum += s; + } + + return sum; + } + + /** + * Add a new number to this Sum + * + * @param a number to be added. + */ + final public void add(double a) { + try { + sums[sums.length] = -1; // Cause IndexOutOfBoundsException + } catch (final IndexOutOfBoundsException e) { + final double[] oldSums = sums; + sums = new double[oldSums.length + 1]; // Extend sums + System.arraycopy(oldSums, 0, sums, 0, oldSums.length); + sums[oldSums.length] = a; // Append a + } + } + } + + public static void main(String[] args) throws Exception { + final Sum sum = new Sum(); + for (int i = 1; i <= 10000; ++i) { + sum.add(1); + double ii = sum.getSum(); + if (i != ii) { + throw new Exception("Failure: computed = " + ii + ", expected = " + i); + } + } + } + +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/exceptions/TestRecursiveReplacedException.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8054224 + * @summary Recursive method compiled by C1 is unable to catch StackOverflowError + * @run main/othervm -Xcomp -XX:CompileOnly=Test.run -XX:+TieredCompilation -XX:TieredStopAtLevel=2 -Xss256K TestRecursiveReplacedException + * + */ + +public class TestRecursiveReplacedException { + + public static void main(String args[]) { + new TestRecursiveReplacedException().run(); + } + + public void run() { + try { + run(); + } catch (Throwable t) { + } + } +} --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/AddExactIntTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/AddExactIntTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build AddExactIntTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics AddExactIntTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class AddExactIntTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/AddExactLongTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/AddExactLongTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build AddExactLongTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics AddExactLongTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class AddExactLongTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/DecrementExactIntTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/DecrementExactIntTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build DecrementExactIntTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics DecrementExactIntTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class DecrementExactIntTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/DecrementExactLongTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/DecrementExactLongTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build DecrementExactLongTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics DecrementExactLongTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class DecrementExactLongTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/IncrementExactIntTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/IncrementExactIntTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build IncrementExactIntTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics IncrementExactIntTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class IncrementExactIntTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/IncrementExactLongTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/IncrementExactLongTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build IncrementExactLongTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics IncrementExactLongTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class IncrementExactLongTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/IntrinsicBase.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/IntrinsicBase.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,6 +22,7 @@ */ import com.oracle.java.testlibrary.Platform; +import intrinsics.Verifier; import java.io.FileOutputStream; import java.lang.reflect.Executable; @@ -79,10 +80,10 @@ System.out.println("Expected intrinsic count is " + expectedIntrinsicCount + " name " + getIntrinsicId()); - final FileOutputStream out = new FileOutputStream(getVMOption("LogFile") + ".verify.properties"); + final FileOutputStream out = new FileOutputStream(getVMOption("LogFile") + Verifier.PROPERTY_FILE_SUFFIX); Properties expectedProps = new Properties(); - expectedProps.setProperty("intrinsic.name", getIntrinsicId()); - expectedProps.setProperty("intrinsic.expectedCount", String.valueOf(expectedIntrinsicCount)); + expectedProps.setProperty(Verifier.INTRINSIC_NAME_PROPERTY, getIntrinsicId()); + expectedProps.setProperty(Verifier.INTRINSIC_EXPECTED_COUNT_PROPERTY, String.valueOf(expectedIntrinsicCount)); expectedProps.store(out, null); out.close(); --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/MultiplyExactIntTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/MultiplyExactIntTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build MultiplyExactIntTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics MultiplyExactIntTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class MultiplyExactIntTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/MultiplyExactLongTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/MultiplyExactLongTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build MultiplyExactLongTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics MultiplyExactLongTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class MultiplyExactLongTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/NegateExactIntTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/NegateExactIntTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build NegateExactIntTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics NegateExactIntTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class NegateExactIntTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/NegateExactLongTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/NegateExactLongTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build NegateExactLongTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics NegateExactLongTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class NegateExactLongTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/SubtractExactIntTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/SubtractExactIntTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build SubtractExactIntTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics SubtractExactIntTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/SubtractExactLongTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/intrinsics/mathexact/sanity/SubtractExactLongTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ /* * @test * @library /testlibrary /testlibrary/whitebox /compiler/whitebox + * /compiler/testlibrary * @build SubtractExactLongTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions @@ -34,7 +35,7 @@ * -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation * -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod * -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics SubtractExactLongTest - * @run main Verifier hs_neg.log hs.log + * @run main intrinsics.Verifier hs_neg.log hs.log */ public class SubtractExactLongTest { --- ./hotspot/test/compiler/intrinsics/mathexact/sanity/Verifier.java Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -import java.io.BufferedReader; -import java.io.FileReader; -import java.util.Properties; - -public class Verifier { - - public static void main(String[] args) throws Exception { - if (args.length == 0) - throw new RuntimeException("Test bug, nothing to verify"); - for (String hsLogFile : args) { - verify(hsLogFile); - } - } - - private static void verify(String hsLogFile) throws Exception { - System.out.println("Verifying " + hsLogFile); - - final Properties expectedProperties = new Properties(); - final FileReader reader = new FileReader(hsLogFile + ".verify.properties"); - expectedProperties.load(reader); - reader.close(); - - int fullMatchCnt = 0; - int suspectCnt = 0; - final String intrinsicId = expectedProperties.getProperty("intrinsic.name"); - final String prefix = " 0 ? Integer.valueOf(args[0]) : 100000); + int warmupIters = (args.length > 1 ? Integer.valueOf(args[1]) : 20000); + + testSHA(provider, algorithm, msgSize, offset, iters, warmupIters); + + if (algorithm2.equals("") == false) { + testSHA(provider, algorithm2, msgSize, offset, iters, warmupIters); + } + } + + static void testSHA(String provider, String algorithm, int msgSize, + int offset, int iters, int warmupIters) throws Exception { + System.out.println("provider = " + provider); + System.out.println("algorithm = " + algorithm); + System.out.println("msgSize = " + msgSize + " bytes"); + System.out.println("offset = " + offset); + System.out.println("iters = " + iters); + + byte[] expectedHash = new byte[HASH_LEN]; + byte[] hash = new byte[HASH_LEN]; + byte[] data = new byte[msgSize + offset]; + for (int i = 0; i < (msgSize + offset); i++) { + data[i] = (byte)(i & 0xff); + } + + try { + MessageDigest sha = MessageDigest.getInstance(algorithm, provider); + + /* do once, which doesn't use intrinsics */ + sha.reset(); + sha.update(data, offset, msgSize); + expectedHash = sha.digest(); + + /* warm up */ + for (int i = 0; i < warmupIters; i++) { + sha.reset(); + sha.update(data, offset, msgSize); + hash = sha.digest(); + } + + /* check result */ + if (Arrays.equals(hash, expectedHash) == false) { + System.out.println("TestSHA Error: "); + showArray(expectedHash, "expectedHash"); + showArray(hash, "computedHash"); + //System.exit(1); + throw new Exception("TestSHA Error"); + } else { + showArray(hash, "hash"); + } + + /* measure performance */ + long start = System.nanoTime(); + for (int i = 0; i < iters; i++) { + sha.reset(); + sha.update(data, offset, msgSize); + hash = sha.digest(); + } + long end = System.nanoTime(); + double total = (double)(end - start)/1e9; /* in seconds */ + double thruput = (double)msgSize*iters/1e6/total; /* in MB/s */ + System.out.println("TestSHA runtime = " + total + " seconds"); + System.out.println("TestSHA throughput = " + thruput + " MB/s"); + System.out.println(); + } catch (Exception e) { + System.out.println("Exception: " + e); + //System.exit(1); + throw new Exception(e); + } + } + + static void showArray(byte b[], String name) { + System.out.format("%s [%d]: ", name, b.length); + for (int i = 0; i < Math.min(b.length, HASH_LEN); i++) { + System.out.format("%02x ", b[i] & 0xff); + } + System.out.println(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/SHAOptionsBase.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.cli.CommandLineOptionTest; +import sha.predicate.IntrinsicPredicates; + +import java.util.function.BooleanSupplier; + +/** + * Base class for all CLI tests on SHA-related options. + * + * Instead of using huge complex tests for each option, each test is constructed + * from several test cases shared among different tests. + */ +public class SHAOptionsBase extends CommandLineOptionTest { + protected static final String USE_SHA_OPTION = "UseSHA"; + protected static final String USE_SHA1_INTRINSICS_OPTION + = "UseSHA1Intrinsics"; + protected static final String USE_SHA256_INTRINSICS_OPTION + = "UseSHA256Intrinsics"; + protected static final String USE_SHA512_INTRINSICS_OPTION + = "UseSHA512Intrinsics"; + + // Note that strings below will be passed to + // CommandLineOptionTest.verifySameJVMStartup and thus are regular + // expressions, not just a plain strings. + protected static final String SHA_INSTRUCTIONS_ARE_NOT_AVAILABLE + = "SHA instructions are not available on this CPU"; + protected static final String SHA1_INSTRUCTION_IS_NOT_AVAILABLE + = "SHA1 instruction is not available on this CPU\\."; + protected static final String SHA256_INSTRUCTION_IS_NOT_AVAILABLE + = "SHA256 instruction \\(for SHA-224 and SHA-256\\) " + + "is not available on this CPU\\."; + protected static final String SHA512_INSTRUCTION_IS_NOT_AVAILABLE + = "SHA512 instruction \\(for SHA-384 and SHA-512\\) " + + "is not available on this CPU\\."; + protected static final String SHA_INTRINSICS_ARE_NOT_AVAILABLE + = "SHA intrinsics are not available on this CPU"; + + private final TestCase[] testCases; + + /** + * Returns warning message that should occur in VM output if an option with + * the name {@code optionName} was turned on and CPU does not support + * required instructions. + * + * @param optionName The name of the option for which warning message should + * be returned. + * @return A warning message that will be printed out to VM output if CPU + * instructions required by the option are not supported. + */ + protected static String getWarningForUnsupportedCPU(String optionName) { + if (Platform.isSparc()) { + switch (optionName) { + case SHAOptionsBase.USE_SHA_OPTION: + return SHAOptionsBase.SHA_INSTRUCTIONS_ARE_NOT_AVAILABLE; + case SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION: + return SHAOptionsBase.SHA1_INSTRUCTION_IS_NOT_AVAILABLE; + case SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION: + return SHAOptionsBase.SHA256_INSTRUCTION_IS_NOT_AVAILABLE; + case SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION: + return SHAOptionsBase.SHA512_INSTRUCTION_IS_NOT_AVAILABLE; + default: + throw new Error("Unexpected option " + optionName); + } + } else if (Platform.isX64() || Platform.isX86()) { + switch (optionName) { + case SHAOptionsBase.USE_SHA_OPTION: + return SHAOptionsBase.SHA_INSTRUCTIONS_ARE_NOT_AVAILABLE; + case SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION: + case SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION: + case SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION: + return SHAOptionsBase.SHA_INTRINSICS_ARE_NOT_AVAILABLE; + default: + throw new Error("Unexpected option " + optionName); + } + } else { + throw new Error("Support for CPUs other then X86 or SPARC is not " + + "implemented."); + } + } + + /** + * Returns the predicate indicating whether or not CPU instructions required + * by the option with name {@code optionName} are available. + * + * @param optionName The name of the option for which a predicate should be + * returned. + * @return The predicate on availability of CPU instructions required by the + * option. + */ + protected static BooleanSupplier getPredicateForOption(String optionName) { + switch (optionName) { + case SHAOptionsBase.USE_SHA_OPTION: + return IntrinsicPredicates.ANY_SHA_INSTRUCTION_AVAILABLE; + case SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION: + return IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE; + case SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION: + return IntrinsicPredicates.SHA256_INSTRUCTION_AVAILABLE; + case SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION: + return IntrinsicPredicates.SHA512_INSTRUCTION_AVAILABLE; + default: + throw new Error("Unexpected option " + optionName); + } + } + + public SHAOptionsBase(TestCase... testCases) { + super(Boolean.TRUE::booleanValue); + this.testCases = testCases; + } + + @Override + protected void runTestCases() throws Throwable { + for (TestCase testCase : testCases) { + testCase.test(); + } + } + + public static abstract class TestCase { + protected final String optionName; + private final BooleanSupplier predicate; + + protected TestCase(String optionName, BooleanSupplier predicate) { + this.optionName = optionName; + this.predicate = predicate; + } + + protected final void test() throws Throwable { + String testCaseName = this.getClass().getName(); + if (!predicate.getAsBoolean()) { + System.out.println("Skipping " + testCaseName + + " due to predicate failure."); + return; + } else { + System.out.println("Running " + testCaseName); + } + + verifyWarnings(); + verifyOptionValues(); + } + + protected void verifyWarnings() throws Throwable { + } + + protected void verifyOptionValues() throws Throwable { + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/TestUseSHA1IntrinsicsOptionOnSupportedCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8035968 + * @summary Verify UseSHA1Intrinsics option processing on supported CPU, + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases + * @build TestUseSHA1IntrinsicsOptionOnSupportedCPU + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI TestUseSHA1IntrinsicsOptionOnSupportedCPU + */ +public class TestUseSHA1IntrinsicsOptionOnSupportedCPU { + public static void main(String args[]) throws Throwable { + new SHAOptionsBase(new GenericTestCaseForSupportedSparcCPU( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION)).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/TestUseSHA1IntrinsicsOptionOnUnsupportedCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8035968 + * @summary Verify UseSHA1Intrinsics option processing on unsupported CPU, + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases + * @build TestUseSHA1IntrinsicsOptionOnUnsupportedCPU + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI + * TestUseSHA1IntrinsicsOptionOnUnsupportedCPU + */ +public class TestUseSHA1IntrinsicsOptionOnUnsupportedCPU { + public static void main(String args[]) throws Throwable { + new SHAOptionsBase( + new GenericTestCaseForUnsupportedSparcCPU( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION), + new UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION), + new GenericTestCaseForUnsupportedX86CPU( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION), + new GenericTestCaseForOtherCPU( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION)).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/TestUseSHA256IntrinsicsOptionOnSupportedCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8035968 + * @summary Verify UseSHA256Intrinsics option processing on supported CPU, + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases + * @build TestUseSHA256IntrinsicsOptionOnSupportedCPU + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI + * TestUseSHA256IntrinsicsOptionOnSupportedCPU + */ +public class TestUseSHA256IntrinsicsOptionOnSupportedCPU { + public static void main(String args[]) throws Throwable { + new SHAOptionsBase(new GenericTestCaseForSupportedSparcCPU( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION)).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/TestUseSHA256IntrinsicsOptionOnUnsupportedCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8035968 + * @summary Verify UseSHA256Intrinsics option processing on unsupported CPU, + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases + * @build TestUseSHA256IntrinsicsOptionOnUnsupportedCPU + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI + * TestUseSHA256IntrinsicsOptionOnUnsupportedCPU + */ +public class TestUseSHA256IntrinsicsOptionOnUnsupportedCPU { + public static void main(String args[]) throws Throwable { + new SHAOptionsBase( + new GenericTestCaseForUnsupportedSparcCPU( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION), + new UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION), + new GenericTestCaseForUnsupportedX86CPU( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION), + new GenericTestCaseForOtherCPU( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION)).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/TestUseSHA512IntrinsicsOptionOnSupportedCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8035968 + * @summary Verify UseSHA512Intrinsics option processing on supported CPU. + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases + * @build TestUseSHA512IntrinsicsOptionOnSupportedCPU + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI + * TestUseSHA512IntrinsicsOptionOnSupportedCPU + */ +public class TestUseSHA512IntrinsicsOptionOnSupportedCPU { + public static void main(String args[]) throws Throwable { + new SHAOptionsBase(new GenericTestCaseForSupportedSparcCPU( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION)).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/TestUseSHA512IntrinsicsOptionOnUnsupportedCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8035968 + * @summary Verify UseSHA512Intrinsics option processing on unsupported CPU, + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases + * @build TestUseSHA512IntrinsicsOptionOnUnsupportedCPU + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI + * TestUseSHA512IntrinsicsOptionOnUnsupportedCPU + */ +public class TestUseSHA512IntrinsicsOptionOnUnsupportedCPU { + public static void main(String args[]) throws Throwable { + new SHAOptionsBase( + new GenericTestCaseForUnsupportedSparcCPU( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION), + new UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION), + new GenericTestCaseForUnsupportedX86CPU( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION), + new GenericTestCaseForOtherCPU( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION)).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/TestUseSHAOptionOnSupportedCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8035968 + * @summary Verify UseSHA option processing on supported CPU, + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases + * @build TestUseSHAOptionOnSupportedCPU + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI TestUseSHAOptionOnSupportedCPU + */ +public class TestUseSHAOptionOnSupportedCPU { + public static void main(String args[]) throws Throwable { + new SHAOptionsBase( + new GenericTestCaseForSupportedSparcCPU( + SHAOptionsBase.USE_SHA_OPTION), + new UseSHASpecificTestCaseForSupportedSparcCPU( + SHAOptionsBase.USE_SHA_OPTION)).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/TestUseSHAOptionOnUnsupportedCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8035968 + * @summary Verify UseSHA option processing on unsupported CPU. + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases + * @build TestUseSHAOptionOnUnsupportedCPU + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI TestUseSHAOptionOnUnsupportedCPU + */ +public class TestUseSHAOptionOnUnsupportedCPU { + public static void main(String args[]) throws Throwable { + new SHAOptionsBase( + new GenericTestCaseForUnsupportedSparcCPU( + SHAOptionsBase.USE_SHA_OPTION), + new UseSHASpecificTestCaseForUnsupportedSparcCPU( + SHAOptionsBase.USE_SHA_OPTION), + new GenericTestCaseForUnsupportedX86CPU( + SHAOptionsBase.USE_SHA_OPTION), + new GenericTestCaseForOtherCPU( + SHAOptionsBase.USE_SHA_OPTION)).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.ExitCode; +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.cli.CommandLineOptionTest; +import com.oracle.java.testlibrary.cli.predicate.NotPredicate; +import com.oracle.java.testlibrary.cli.predicate.OrPredicate; + +/** + * Generic test case for SHA-related options targeted to non-x86 and + * non-SPARC CPUs. + */ +public class GenericTestCaseForOtherCPU extends + SHAOptionsBase.TestCase { + public GenericTestCaseForOtherCPU(String optionName) { + // Execute the test case on any CPU except SPARC and X86 + super(optionName, new NotPredicate(new OrPredicate(Platform::isSparc, + new OrPredicate(Platform::isX64, Platform::isX86)))); + } + + @Override + protected void verifyWarnings() throws Throwable { + // Verify that on non-x86 and non-SPARC CPU usage of SHA-related + // options will not cause any warnings. + CommandLineOptionTest.verifySameJVMStartup(null, + new String[] { ".*" + optionName + ".*" }, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag(optionName, true)); + + CommandLineOptionTest.verifySameJVMStartup(null, + new String[] { ".*" + optionName + ".*" }, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag(optionName, false)); + } + + @Override + protected void verifyOptionValues() throws Throwable { + // Verify that option is disabled by default. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false"); + + // Verify that option is disabled even if it was explicitly enabled + // using CLI options. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", + CommandLineOptionTest.prepareBooleanFlag(optionName, true)); + + // Verify that option is disabled when it explicitly disabled + // using CLI options. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", + CommandLineOptionTest.prepareBooleanFlag(optionName, false)); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForSupportedSparcCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.ExitCode; +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.cli.CommandLineOptionTest; +import com.oracle.java.testlibrary.cli.predicate.AndPredicate; + +/** + * Generic test case for SHA-related options targeted to SPARC CPUs which + * support instructions required by the tested option. + */ +public class GenericTestCaseForSupportedSparcCPU extends + SHAOptionsBase.TestCase { + public GenericTestCaseForSupportedSparcCPU(String optionName) { + super(optionName, new AndPredicate(Platform::isSparc, + SHAOptionsBase.getPredicateForOption(optionName))); + } + + @Override + protected void verifyWarnings() throws Throwable { + // Verify that there are no warning when option is explicitly enabled. + CommandLineOptionTest.verifySameJVMStartup(null, new String[] { + SHAOptionsBase.getWarningForUnsupportedCPU(optionName) + }, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag(optionName, true)); + + // Verify that option could be disabled even if +UseSHA was passed to + // JVM. + CommandLineOptionTest.verifySameJVMStartup(null, new String[] { + SHAOptionsBase.getWarningForUnsupportedCPU(optionName) + }, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag(optionName, false)); + + // Verify that it is possible to enable the tested option and disable + // all SHA intrinsics via -UseSHA without any warnings. + CommandLineOptionTest.verifySameJVMStartup(null, new String[] { + SHAOptionsBase.getWarningForUnsupportedCPU(optionName) + }, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA_OPTION, false), + CommandLineOptionTest.prepareBooleanFlag(optionName, true)); + } + + @Override + protected void verifyOptionValues() throws Throwable { + // Verify that on supported CPU option is enabled by default. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "true"); + + // Verify that it is possible to explicitly enable the option. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "true", + CommandLineOptionTest.prepareBooleanFlag(optionName, true)); + + // Verify that it is possible to explicitly disable the option. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", + CommandLineOptionTest.prepareBooleanFlag(optionName, false)); + + // verify that option is disabled when -UseSHA was passed to JVM. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", + CommandLineOptionTest.prepareBooleanFlag(optionName, true), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA_OPTION, false)); + + // Verify that it is possible to explicitly disable the tested option + // even if +UseSHA was passed to JVM. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag(optionName, false)); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedSparcCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.ExitCode; +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.cli.CommandLineOptionTest; +import com.oracle.java.testlibrary.cli.predicate.AndPredicate; +import com.oracle.java.testlibrary.cli.predicate.NotPredicate; + +/** + * Generic test case for SHA-related options targeted to SPARC CPUs which don't + * support instruction required by the tested option. + */ +public class GenericTestCaseForUnsupportedSparcCPU extends + SHAOptionsBase.TestCase { + public GenericTestCaseForUnsupportedSparcCPU(String optionName) { + super(optionName, new AndPredicate(Platform::isSparc, + new NotPredicate(SHAOptionsBase.getPredicateForOption( + optionName)))); + } + + @Override + protected void verifyWarnings() throws Throwable { + //Verify that option could be disabled without any warnings. + CommandLineOptionTest.verifySameJVMStartup(null, new String[] { + SHAOptionsBase.getWarningForUnsupportedCPU(optionName) + }, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag(optionName, false)); + } + + @Override + protected void verifyOptionValues() throws Throwable { + // Verify that option is disabled by default. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false"); + + // Verify that option is disabled even if it was explicitly enabled + // using CLI options. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", + CommandLineOptionTest.prepareBooleanFlag(optionName, true)); + + // Verify that option is disabled when +UseSHA was passed to JVM. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA_OPTION, true)); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedX86CPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.ExitCode; +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.cli.CommandLineOptionTest; +import com.oracle.java.testlibrary.cli.predicate.OrPredicate; + +/** + * Generic test case for SHA-related options targeted to X86 CPUs that don't + * support SHA-related instructions. + */ +public class GenericTestCaseForUnsupportedX86CPU + extends SHAOptionsBase.TestCase { + public GenericTestCaseForUnsupportedX86CPU(String optionName) { + super(optionName, new OrPredicate(Platform::isX64, Platform::isX86)); + } + + @Override + protected void verifyWarnings() throws Throwable { + // Verify that when the tested option is explicitly enabled, then + // a warning will occur in VM output. + CommandLineOptionTest.verifySameJVMStartup(new String[] { + SHAOptionsBase.getWarningForUnsupportedCPU(optionName) + }, null, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag(optionName, true)); + + // Verify that the tested option could be explicitly disabled without + // a warning. + CommandLineOptionTest.verifySameJVMStartup(null, new String[] { + SHAOptionsBase.getWarningForUnsupportedCPU(optionName) + }, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag(optionName, false)); + } + + @Override + protected void verifyOptionValues() throws Throwable { + // Verify that the tested option is disabled by default. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false"); + + // Verify that it is not possible to explicitly enable the option. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", + CommandLineOptionTest.prepareBooleanFlag(optionName, true)); + + // Verify that the tested option is disabled even if +UseSHA was passed + // to JVM. + CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false", + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA_OPTION, true)); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/testcases/UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.ExitCode; +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.cli.CommandLineOptionTest; +import com.oracle.java.testlibrary.cli.predicate.AndPredicate; +import com.oracle.java.testlibrary.cli.predicate.NotPredicate; +import sha.predicate.IntrinsicPredicates; + +/** + * Test case specific to UseSHA*Intrinsics options targeted to SPARC CPUs which + * don't support required instruction, but support other SHA-related + * instructions. + * + * For example, CPU support sha1 instruction, but don't support sha256 or + * sha512. + */ +public class UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU + extends SHAOptionsBase.TestCase { + public UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU( + String optionName) { + // execute test case on SPARC CPU that support any sha* instructions, + // but does not support sha* instruction required by the tested option. + super(optionName, new AndPredicate(Platform::isSparc, + new AndPredicate( + IntrinsicPredicates.ANY_SHA_INSTRUCTION_AVAILABLE, + new NotPredicate(SHAOptionsBase.getPredicateForOption( + optionName))))); + } + @Override + protected void verifyWarnings() throws Throwable { + // Verify that attempt to enable the tested option will cause a warning. + CommandLineOptionTest.verifySameJVMStartup(new String[] { + SHAOptionsBase.getWarningForUnsupportedCPU(optionName) + }, null, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag(optionName, true)); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/testcases/UseSHASpecificTestCaseForSupportedSparcCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.Asserts; +import com.oracle.java.testlibrary.ExitCode; +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.cli.CommandLineOptionTest; +import com.oracle.java.testlibrary.cli.predicate.AndPredicate; +import sha.predicate.IntrinsicPredicates; + +/** + * UseSHA specific test case targeted to SPARC CPUs which support any sha* + * instruction. + */ +public class UseSHASpecificTestCaseForSupportedSparcCPU + extends SHAOptionsBase.TestCase { + public UseSHASpecificTestCaseForSupportedSparcCPU(String optionName) { + super(SHAOptionsBase.USE_SHA_OPTION, new AndPredicate(Platform::isSparc, + IntrinsicPredicates.ANY_SHA_INSTRUCTION_AVAILABLE)); + + Asserts.assertEQ(optionName, SHAOptionsBase.USE_SHA_OPTION, + "Test case should be used for " + SHAOptionsBase.USE_SHA_OPTION + + " option only."); + } + + @Override + protected void verifyWarnings() throws Throwable { + // Verify that there will be no warnings when +UseSHA was passed and + // all UseSHA*Intrinsics options were disabled. + CommandLineOptionTest.verifySameJVMStartup( + null, new String[] { ".*UseSHA.*" }, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, false), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, false), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, false)); + } + + @Override + protected void verifyOptionValues() throws Throwable { + // Verify that UseSHA is disabled when all UseSHA*Intrinscs are + // disabled. + CommandLineOptionTest.verifyOptionValueForSameVM( + SHAOptionsBase.USE_SHA_OPTION, "false", + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, false), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, false), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, false)); + + CommandLineOptionTest.verifyOptionValueForSameVM( + // Verify that UseSHA is disabled when all UseSHA*Intrinscs are + // disabled even if it was explicitly enabled. + SHAOptionsBase.USE_SHA_OPTION, "false", + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, false), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, false), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, false)); + + // Verify that explicitly disabled UseSHA option remains disabled even + // if all UseSHA*Intrinsics options were enabled. + CommandLineOptionTest.verifyOptionValueForSameVM( + SHAOptionsBase.USE_SHA_OPTION, "false", + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA_OPTION, false), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, true)); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/cli/testcases/UseSHASpecificTestCaseForUnsupportedSparcCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.Asserts; +import com.oracle.java.testlibrary.ExitCode; +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.cli.CommandLineOptionTest; +import com.oracle.java.testlibrary.cli.predicate.AndPredicate; +import com.oracle.java.testlibrary.cli.predicate.NotPredicate; +import sha.predicate.IntrinsicPredicates; + +/** + * UseSHA specific test case targeted to SPARC CPUs which don't support all sha* + * instructions. + */ +public class UseSHASpecificTestCaseForUnsupportedSparcCPU + extends SHAOptionsBase.TestCase { + public UseSHASpecificTestCaseForUnsupportedSparcCPU(String optionName) { + super(SHAOptionsBase.USE_SHA_OPTION, new AndPredicate(Platform::isSparc, + new NotPredicate( + IntrinsicPredicates.ANY_SHA_INSTRUCTION_AVAILABLE))); + + Asserts.assertEQ(optionName, SHAOptionsBase.USE_SHA_OPTION, + "Test case should be used for " + SHAOptionsBase.USE_SHA_OPTION + + " option only."); + } + + @Override + protected void verifyWarnings() throws Throwable { + // Verify that attempt to use UseSHA option will cause a warning. + CommandLineOptionTest.verifySameJVMStartup(new String[] { + SHAOptionsBase.getWarningForUnsupportedCPU(optionName) + }, null, ExitCode.OK, + CommandLineOptionTest.prepareBooleanFlag(optionName, true)); + } + + @Override + protected void verifyOptionValues() throws Throwable { + // Verify that UseSHA option remains disabled even if all + // UseSHA*Intrincs options were enabled. + CommandLineOptionTest.verifyOptionValueForSameVM( + SHAOptionsBase.USE_SHA_OPTION, "false", + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, true)); + + // Verify that UseSHA option remains disabled even if all + // UseSHA*Intrincs options were enabled and UseSHA was enabled as well. + CommandLineOptionTest.verifyOptionValueForSameVM( + SHAOptionsBase.USE_SHA_OPTION, "false", + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, true), + CommandLineOptionTest.prepareBooleanFlag( + SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, true)); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/sanity/SHASanityTestBase.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import intrinsics.Verifier; +import sun.hotspot.WhiteBox; + +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.Objects; +import java.util.Properties; +import java.util.function.BooleanSupplier; + +/** + * Base class for sanity tests on SHA intrinsics support. + */ +public class SHASanityTestBase { + protected static final String SHA1_INTRINSIC_ID + = "_sha_implCompress"; + protected static final String SHA256_INTRINSIC_ID + = "_sha2_implCompress"; + protected static final String SHA512_INTRINSIC_ID + = "_sha5_implCompress"; + protected static final String MB_INTRINSIC_ID + = "_digestBase_implCompressMB"; + + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + private static final int MSG_SIZE = 1024; + private static final int OFFSET = 0; + private static final int ITERATIONS = 10000; + private static final int WARMUP_ITERATIONS = 1; + private static final String PROVIDER = "SUN"; + + private final BooleanSupplier predicate; + private final String intrinsicID; + + /** + * Construct the new test on intrinsic with ID {@code intrinsicID}, + * which is expected to be emitted if {@code predicate} is evaluated to + * {@code true}. + * + * @param predicate The predicate indicating if the intrinsic is expected to + * be used. + * @param intrinsicID The ID of the intrinsic to be tested. + */ + protected SHASanityTestBase(BooleanSupplier predicate, String intrinsicID) { + this.predicate = predicate; + this.intrinsicID = intrinsicID; + } + + /** + * Run the test and dump properties to file. + * + * @throws Exception when something went wrong. + */ + public final void test() throws Exception { + String algorithm = Objects.requireNonNull( + System.getProperty("algorithm"), + "Algorithm name should be specified."); + + dumpProperties(); + + TestSHA.testSHA(SHASanityTestBase.PROVIDER, algorithm, + SHASanityTestBase.MSG_SIZE, SHASanityTestBase.OFFSET, + SHASanityTestBase.ITERATIONS, + SHASanityTestBase.WARMUP_ITERATIONS); + } + + /** + * Dump properties containing information about the tested intrinsic name + * and whether or not is should be used to the file + * <LogFile value>.verify.properties. + * + * @throws IOException when something went wrong during dumping to file. + */ + private void dumpProperties() throws IOException { + Properties properties = new Properties(); + properties.setProperty(Verifier.INTRINSIC_NAME_PROPERTY, intrinsicID); + properties.setProperty(Verifier.INTRINSIC_IS_EXPECTED_PROPERTY, + String.valueOf(predicate.getAsBoolean())); + + String logFileName + = SHASanityTestBase.WHITE_BOX.getStringVMFlag("LogFile"); + FileOutputStream fileOutputStream = new FileOutputStream(logFileName + + Verifier.PROPERTY_FILE_SUFFIX); + + properties.store(fileOutputStream, null); + fileOutputStream.close(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/sanity/TestSHA1Intrinsics.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8035968 + * @summary Verify that SHA-1 intrinsic is actually used. + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../ + * @build TestSHA intrinsics.Verifier TestSHA1Intrinsics + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA1Intrinsics + * -Dalgorithm=SHA-1 TestSHA1Intrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=negative.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:-UseSHA1Intrinsics + * -Dalgorithm=SHA-1 TestSHA1Intrinsics + * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE + * intrinsics.Verifier positive.log negative.log + */ +import sha.predicate.IntrinsicPredicates; + +public class TestSHA1Intrinsics { + public static void main(String args[]) throws Exception { + new SHASanityTestBase(IntrinsicPredicates.SHA1_INTRINSICS_AVAILABLE, + SHASanityTestBase.SHA1_INTRINSIC_ID).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/sanity/TestSHA1MultiBlockIntrinsics.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sha.predicate.IntrinsicPredicates; + +/** + * @test + * @bug 8035968 + * @summary Verify that SHA-1 multi block intrinsic is actually used. + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../ + * @build TestSHA intrinsics.Verifier TestSHA1MultiBlockIntrinsics + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA1Intrinsics -XX:-UseSHA256Intrinsics + * -XX:-UseSHA512Intrinsics + * -Dalgorithm=SHA-1 TestSHA1MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_def.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA1Intrinsics -Dalgorithm=SHA-1 + * TestSHA1MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=negative.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA -XX:-UseSHA + * -Dalgorithm=SHA-1 TestSHA1MultiBlockIntrinsics + * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE + * intrinsics.Verifier positive.log positive_def.log + * negative.log + */ +public class TestSHA1MultiBlockIntrinsics { + public static void main(String args[]) throws Exception { + new SHASanityTestBase(IntrinsicPredicates.SHA1_INTRINSICS_AVAILABLE, + SHASanityTestBase.MB_INTRINSIC_ID).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/sanity/TestSHA256Intrinsics.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sha.predicate.IntrinsicPredicates; + +/** + * @test + * @bug 8035968 + * @summary Verify that SHA-256 intrinsic is actually used. + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../ + * @build TestSHA intrinsics.Verifier TestSHA256Intrinsics + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_224.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA256Intrinsics + * -Dalgorithm=SHA-224 TestSHA256Intrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=negative_224.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:-UseSHA256Intrinsics + * -Dalgorithm=SHA-224 TestSHA256Intrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_256.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA256Intrinsics + * -Dalgorithm=SHA-256 TestSHA256Intrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=negative_256.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:-UseSHA256Intrinsics + * -Dalgorithm=SHA-256 TestSHA256Intrinsics + * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE + * intrinsics.Verifier positive_224.log positive_256.log + * negative_224.log negative_256.log + */ +public class TestSHA256Intrinsics { + public static void main(String args[]) throws Exception { + new SHASanityTestBase(IntrinsicPredicates.SHA256_INTRINSICS_AVAILABLE, + SHASanityTestBase.SHA256_INTRINSIC_ID).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/sanity/TestSHA256MultiBlockIntrinsics.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sha.predicate.IntrinsicPredicates; + +/** + * @test + * @bug 8035968 + * @summary Verify that SHA-256 multi block intrinsic is actually used. + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../ + * @build TestSHA intrinsics.Verifier TestSHA256MultiBlockIntrinsics + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_224.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA256Intrinsics -XX:-UseSHA1Intrinsics + * -XX:-UseSHA512Intrinsics + * -Dalgorithm=SHA-224 TestSHA256MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_224_def.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA256Intrinsics -Dalgorithm=SHA-224 + * TestSHA256MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=negative_224.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA -XX:-UseSHA + * -Dalgorithm=SHA-224 TestSHA256MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_256.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA256Intrinsics -XX:-UseSHA1Intrinsics + * -XX:-UseSHA512Intrinsics + * -Dalgorithm=SHA-256 TestSHA256MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_256_def.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA256Intrinsics -Dalgorithm=SHA-256 + * TestSHA256MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=negative_256.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA -XX:-UseSHA + * -Dalgorithm=SHA-256 TestSHA256MultiBlockIntrinsics + * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE + * intrinsics.Verifier positive_224.log positive_256.log + * positive_224_def.log positive_256_def.log negative_224.log + * negative_256.log + */ +public class TestSHA256MultiBlockIntrinsics { + public static void main(String args[]) throws Exception { + new SHASanityTestBase(IntrinsicPredicates.SHA256_INTRINSICS_AVAILABLE, + SHASanityTestBase.MB_INTRINSIC_ID).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/sanity/TestSHA512Intrinsics.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sha.predicate.IntrinsicPredicates; + +/** + * @test + * @bug 8035968 + * @summary Verify that SHA-512 intrinsic is actually used. + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../ + * @build TestSHA intrinsics.Verifier TestSHA512Intrinsics + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_384.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA512Intrinsics + * -Dalgorithm=SHA-384 TestSHA512Intrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=negative_384.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:-UseSHA512Intrinsics + * -Dalgorithm=SHA-384 TestSHA512Intrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_512.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA512Intrinsics + * -Dalgorithm=SHA-512 TestSHA512Intrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=negative_512.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:-UseSHA512Intrinsics + * -Dalgorithm=SHA-512 TestSHA512Intrinsics + * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE + * intrinsics.Verifier positive_384.log positive_512.log + * negative_384.log negative_512.log + */ +public class TestSHA512Intrinsics { + public static void main(String args[]) throws Exception { + new SHASanityTestBase(IntrinsicPredicates.SHA512_INTRINSICS_AVAILABLE, + SHASanityTestBase.SHA512_INTRINSIC_ID).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/intrinsics/sha/sanity/TestSHA512MultiBlockIntrinsics.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sha.predicate.IntrinsicPredicates; + +/** + * @test + * @bug 8035968 + * @summary Verify that SHA-512 multi block intrinsic is actually used. + * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../ + * @build TestSHA intrinsics.Verifier TestSHA512MultiBlockIntrinsics + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_384.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA512Intrinsics -XX:-UseSHA1Intrinsics + * -XX:-UseSHA256Intrinsics + * -Dalgorithm=SHA-384 TestSHA512MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_384_def.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA512Intrinsics -Dalgorithm=SHA-384 + * TestSHA512MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=negative_384.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA -XX:-UseSHA + * -Dalgorithm=SHA-384 TestSHA1Intrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_512.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA512Intrinsics -XX:-UseSHA1Intrinsics + * -XX:-UseSHA256Intrinsics + * -Dalgorithm=SHA-512 TestSHA512MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=positive_512_def.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA + * -XX:+UseSHA512Intrinsics -Dalgorithm=SHA-512 + * TestSHA512MultiBlockIntrinsics + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500 + * -XX:Tier4InvocationThreshold=500 + * -XX:+LogCompilation -XX:LogFile=negative_512.log + * -XX:CompileOnly=sun/security/provider/DigestBase + * -XX:CompileOnly=sun/security/provider/SHA -XX:-UseSHA + * -Dalgorithm=SHA-512 TestSHA512MultiBlockIntrinsics + * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE + * intrinsics.Verifier positive_384.log positive_512.log + * positive_384_def.log positive_512_def.log negative_384.log + * negative_512.log + */ +public class TestSHA512MultiBlockIntrinsics { + public static void main(String args[]) throws Exception { + new SHASanityTestBase(IntrinsicPredicates.SHA512_INTRINSICS_AVAILABLE, + SHASanityTestBase.MB_INTRINSIC_ID).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/jsr292/NullConstantReceiver.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8059556 + * @run main/othervm -Xbatch NullConstantReceiver + */ + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; + +public class NullConstantReceiver { + static final MethodHandle target; + static { + try { + target = MethodHandles.lookup().findVirtual(NullConstantReceiver.class, "test", MethodType.methodType(void.class)); + } catch (ReflectiveOperationException e) { + throw new Error(e); + } + } + + public void test() {} + + static void run() throws Throwable { + target.invokeExact((NullConstantReceiver) null); + } + + public static void main(String[] args) throws Throwable { + for (int i = 0; i<15000; i++) { + try { + run(); + } catch (NullPointerException e) { + // expected + continue; + } + throw new AssertionError("NPE wasn't thrown"); + } + System.out.println("TEST PASSED"); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/jsr292/RedefineMethodUsedByMultipleMethodHandles.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8042235 + * @summary redefining method used by multiple MethodHandles crashes VM + * @compile -XDignore.symbol.file RedefineMethodUsedByMultipleMethodHandles.java + * @run main RedefineMethodUsedByMultipleMethodHandles + */ + +import java.io.*; +import java.lang.instrument.*; +import java.lang.invoke.*; +import java.lang.invoke.MethodHandles.Lookup; +import java.lang.management.*; +import java.lang.reflect.*; +import java.nio.file.*; +import java.security.*; +import java.util.jar.*; + +import javax.tools.*; + +import jdk.internal.org.objectweb.asm.*; + +public class RedefineMethodUsedByMultipleMethodHandles { + + static class Foo { + public static Object getName() { + return "foo"; + } + } + + public static void main(String[] args) throws Throwable { + + Lookup lookup = MethodHandles.lookup(); + Method fooMethod = Foo.class.getDeclaredMethod("getName"); + + // fooMH2 displaces fooMH1 from the MemberNamesTable + MethodHandle fooMH1 = lookup.unreflect(fooMethod); + MethodHandle fooMH2 = lookup.unreflect(fooMethod); + + System.out.println("fooMH1.invoke = " + fooMH1.invokeExact()); + System.out.println("fooMH2.invoke = " + fooMH2.invokeExact()); + + // Redefining Foo.getName() causes vmtarget to be updated + // in fooMH2 but not fooMH1 + redefineFoo(); + + // Full GC causes fooMH1.vmtarget to be deallocated + System.gc(); + + // Calling fooMH1.vmtarget crashes the VM + System.out.println("fooMH1.invoke = " + fooMH1.invokeExact()); + } + + /** + * Adds the class file bytes for {@code c} to {@code jar}. + */ + static void add(JarOutputStream jar, Class c) throws IOException { + String classAsPath = c.getName().replace('.', '/') + ".class"; + jar.putNextEntry(new JarEntry(classAsPath)); + InputStream stream = c.getClassLoader().getResourceAsStream(classAsPath); + + int b; + while ((b = stream.read()) != -1) { + jar.write(b); + } + } + + static void redefineFoo() throws Exception { + Manifest manifest = new Manifest(); + manifest.getMainAttributes().put(Attributes.Name.MANIFEST_VERSION, "1.0"); + Attributes mainAttrs = manifest.getMainAttributes(); + mainAttrs.putValue("Agent-Class", FooAgent.class.getName()); + mainAttrs.putValue("Can-Redefine-Classes", "true"); + mainAttrs.putValue("Can-Retransform-Classes", "true"); + + Path jar = Files.createTempFile("myagent", ".jar"); + try { + JarOutputStream jarStream = new JarOutputStream(new FileOutputStream(jar.toFile()), manifest); + add(jarStream, FooAgent.class); + add(jarStream, FooTransformer.class); + jarStream.close(); + runAgent(jar); + } finally { + Files.deleteIfExists(jar); + } + } + + public static void runAgent(Path agent) throws Exception { + String vmName = ManagementFactory.getRuntimeMXBean().getName(); + int p = vmName.indexOf('@'); + assert p != -1 : "VM name not in @ format: " + vmName; + String pid = vmName.substring(0, p); + ClassLoader cl = ToolProvider.getSystemToolClassLoader(); + Class c = Class.forName("com.sun.tools.attach.VirtualMachine", true, cl); + Method attach = c.getDeclaredMethod("attach", String.class); + Method loadAgent = c.getDeclaredMethod("loadAgent", String.class); + Method detach = c.getDeclaredMethod("detach"); + Object vm = attach.invoke(null, pid); + loadAgent.invoke(vm, agent.toString()); + detach.invoke(vm); + } + + public static class FooAgent { + + public static void agentmain(@SuppressWarnings("unused") String args, Instrumentation inst) throws Exception { + assert inst.isRedefineClassesSupported(); + assert inst.isRetransformClassesSupported(); + inst.addTransformer(new FooTransformer(), true); + Class[] classes = inst.getAllLoadedClasses(); + for (int i = 0; i < classes.length; i++) { + Class c = classes[i]; + if (c == Foo.class) { + inst.retransformClasses(new Class[]{c}); + } + } + } + } + + static class FooTransformer implements ClassFileTransformer { + + @Override + public byte[] transform(ClassLoader cl, String className, Class classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws IllegalClassFormatException { + if (Foo.class.equals(classBeingRedefined)) { + System.out.println("redefining " + classBeingRedefined); + ClassReader cr = new ClassReader(classfileBuffer); + ClassWriter cw = new ClassWriter(cr, ClassWriter.COMPUTE_FRAMES); + ClassVisitor adapter = new ClassVisitor(Opcodes.ASM5, cw) { + @Override + public MethodVisitor visitMethod(int access, String base, String desc, String signature, String[] exceptions) { + MethodVisitor mv = cv.visitMethod(access, base, desc, signature, exceptions); + if (mv != null) { + mv = new MethodVisitor(Opcodes.ASM5, mv) { + @Override + public void visitLdcInsn(Object cst) { + System.out.println("replacing \"" + cst + "\" with \"bar\""); + mv.visitLdcInsn("bar"); + } + }; + } + return mv; + } + }; + + cr.accept(adapter, ClassReader.SKIP_FRAMES); + cw.visitEnd(); + return cw.toByteArray(); + } + return classfileBuffer; + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/jsr292/VMAnonymousClasses.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8058828 + * @run main/bootclasspath -Xbatch VMAnonymousClasses + */ + +import jdk.internal.org.objectweb.asm.ClassWriter; +import jdk.internal.org.objectweb.asm.MethodVisitor; +import jdk.internal.org.objectweb.asm.Opcodes; +import sun.misc.Unsafe; + +import java.lang.invoke.ConstantCallSite; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.invoke.MutableCallSite; +import java.lang.invoke.VolatileCallSite; + +public class VMAnonymousClasses { + static final String TEST_METHOD_NAME = "constant"; + + static final Unsafe UNSAFE = Unsafe.getUnsafe(); + + static int getConstantPoolSize(byte[] classFile) { + // The first few bytes: + // u4 magic; + // u2 minor_version; + // u2 major_version; + // u2 constant_pool_count; + return ((classFile[8] & 0xFF) << 8) | (classFile[9] & 0xFF); + } + + static void test(Object value) throws ReflectiveOperationException { + System.out.printf("Test: %s", value != null ? value.getClass() : "null"); + + ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_MAXS | ClassWriter.COMPUTE_FRAMES); + cw.visit(Opcodes.V1_8, Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER, "Test", null, "java/lang/Object", null); + + MethodVisitor mv = cw.visitMethod(Opcodes.ACC_STATIC | Opcodes.ACC_PUBLIC, TEST_METHOD_NAME, "()Ljava/lang/Object;", null, null); + + String placeholder = "CONSTANT"; + int index = cw.newConst(placeholder); + mv.visitLdcInsn(placeholder); + mv.visitInsn(Opcodes.ARETURN); + + mv.visitMaxs(0, 0); + mv.visitEnd(); + + byte[] classFile = cw.toByteArray(); + + Object[] cpPatches = new Object[getConstantPoolSize(classFile)]; + cpPatches[index] = value; + + Class test = UNSAFE.defineAnonymousClass(VMAnonymousClasses.class, classFile, cpPatches); + + Object expectedResult = (value != null) ? value : placeholder; + for (int i = 0; i<15000; i++) { + Object result = test.getMethod(TEST_METHOD_NAME).invoke(null); + if (result != expectedResult) { + throw new AssertionError(String.format("Wrong value returned: %s != %s", value, result)); + } + } + System.out.println(" PASSED"); + } + + public static void main(String[] args) throws ReflectiveOperationException { + // Objects + test(new Object()); + test("TEST"); + test(new VMAnonymousClasses()); + test(null); + + // Class + test(String.class); + + // Arrays + test(new boolean[0]); + test(new byte[0]); + test(new char[0]); + test(new short[0]); + test(new int[0]); + test(new long[0]); + test(new float[0]); + test(new double[0]); + test(new Object[0]); + + // Multi-dimensional arrays + test(new byte[0][0]); + test(new Object[0][0]); + + // MethodHandle-related + MethodType mt = MethodType.methodType(void.class, String[].class); + MethodHandle mh = MethodHandles.lookup().findStatic(VMAnonymousClasses.class, "main", mt); + test(mt); + test(mh); + test(new ConstantCallSite(mh)); + test(new MutableCallSite(MethodType.methodType(void.class))); + test(new VolatileCallSite(MethodType.methodType(void.class))); + + System.out.println("TEST PASSED"); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/loopopts/TestDeadBackbranchArrayAccess.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 8054478 + * @summary dead backbranch in main loop results in erroneous array access + * @run main/othervm -XX:CompileOnly=TestDeadBackbranchArrayAccess -Xcomp TestDeadBackbranchArrayAccess + * + */ + +public class TestDeadBackbranchArrayAccess { + static char[] pattern0 = {0}; + static char[] pattern1 = {1}; + + static void test(char[] array) { + if (pattern1 == null) return; + + int i = 0; + int pos = 0; + char c = array[pos]; + + while (i >= 0 && (c == pattern0[i] || c == pattern1[i])) { + i--; + pos--; + if (pos != -1) { + c = array[pos]; + } + } + } + + public static void main(String[] args) { + for (int i = 0; i < 1000000; i++) { + test(new char[1]); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/macronodes/TestEliminateAllocationPhi.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8046698 + * @summary PhiNode inserted between AllocateNode and Initialization node confuses allocation elimination + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestEliminateAllocationPhi + * + */ + +public class TestEliminateAllocationPhi { + + // This will return I when called from m(0 and once optimized will + // go away but this will confuse escape analysis in m(): it will + // find I as non escaping but non scalar replaceable. In its own + // method so that we can make the profile of the if() branch look + // like it's taken sometimes. + static Integer m2(Integer I, int i) { + for (; i < 10; i=(i+2)*(i+2)) { + } + if (i == 121) { + return II; + } + return I; + } + + static Integer II = new Integer(42); + + static int m(int[] integers, boolean flag) { + int j = 0; + while(true) { + try { + int k = integers[j++]; + // A branch that will cause loop unswitching + if (flag) { + k += 42; + } + if (k < 1000) { + throw new Exception(); + } + // Because of the try/catch the Allocate node for this + // new will be in the loop while the Initialization + // node will be outside the loop. When loop + // unswitching happens, the Allocate node will be + // cloned and the results of both will be inputs to a + // Phi that will be between the Allocate nodes and the + // Initialization nodes. + Integer I = new Integer(k); + + I = m2(I, 0); + + int i = I.intValue(); + return i; + } catch(Exception e) { + } + } + } + + static public void main(String[] args) { + for (int i = 0; i < 5000; i++) { + m2(null, 1); + } + + int[] integers = { 2000 }; + for (int i = 0; i < 6000; i++) { + m(integers, (i%2) == 0); + } + int[] integers2 = { 1, 2, 3, 4, 5, 2000 }; + for (int i = 0; i < 10000; i++) { + m(integers2, (i%2) == 0); + } + } +} --- ./hotspot/test/compiler/membars/DekkerTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/membars/DekkerTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -25,9 +25,9 @@ * @test * @bug 8007898 * @summary Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier(). - * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest - * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest - * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest * @author Martin Doerr martin DOT doerr AT sap DOT com * * Run 3 times since the failure is intermittent. --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/membars/TestMemBarAcquire.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestMemBarAcquire + * @bug 8048879 + * @summary "Tests optimization of MemBarAcquireNodes" + * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation TestMemBarAcquire + */ +public class TestMemBarAcquire { + private volatile static Object defaultObj = new Object(); + private Object obj; + + public TestMemBarAcquire(Object param) { + // Volatile load. MemBarAcquireNode is added after the + // load to prevent following loads from floating up past. + // StoreNode is added to store result of load in 'obj'. + this.obj = defaultObj; + // Overrides 'obj' and therefore makes previous StoreNode + // and the corresponding LoadNode useless. However, the + // LoadNode is still connected to the MemBarAcquireNode + // that should now release the reference. + this.obj = param; + } + + public static void main(String[] args) throws Exception { + // Make sure TestMemBarAcquire:: is compiled + for (int i = 0; i < 100000; ++i) { + TestMemBarAcquire p = new TestMemBarAcquire(new Object()); + } + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/osr/TestOSRWithNonEmptyStack.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; + +import jdk.internal.org.objectweb.asm.ClassWriter; +import jdk.internal.org.objectweb.asm.Label; +import jdk.internal.org.objectweb.asm.MethodVisitor; +import static jdk.internal.org.objectweb.asm.Opcodes.*; + +/** + * @test + * @bug 8051344 + * @summary Force OSR compilation with non-empty stack at the OSR entry point. + * @compile -XDignore.symbol.file TestOSRWithNonEmptyStack.java + * @run main/othervm -XX:CompileOnly=TestCase.test TestOSRWithNonEmptyStack + */ +public class TestOSRWithNonEmptyStack extends ClassLoader { + private static final int CLASS_FILE_VERSION = 52; + private static final String CLASS_NAME = "TestCase"; + private static final String METHOD_NAME = "test"; + private static final int ITERATIONS = 1_000_000; + + private static byte[] generateTestClass() { + ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES); + + cw.visit(TestOSRWithNonEmptyStack.CLASS_FILE_VERSION, ACC_PUBLIC, + TestOSRWithNonEmptyStack.CLASS_NAME, null, "java/lang/Object", + null); + + TestOSRWithNonEmptyStack.generateConstructor(cw); + TestOSRWithNonEmptyStack.generateTestMethod(cw); + + cw.visitEnd(); + return cw.toByteArray(); + } + + private static void generateConstructor(ClassWriter classWriter) { + MethodVisitor mv = classWriter.visitMethod(ACC_PUBLIC, "", "()V", + null, null); + + mv.visitCode(); + + mv.visitVarInsn(ALOAD, 0); + mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "", "()V", + false); + mv.visitInsn(RETURN); + + mv.visitMaxs(0, 0); + mv.visitEnd(); + } + + private static void generateTestMethod(ClassWriter classWriter) { + MethodVisitor mv = classWriter.visitMethod(ACC_PUBLIC, + TestOSRWithNonEmptyStack.METHOD_NAME, "()V", null, null); + Label osrEntryPoint = new Label(); + + mv.visitCode(); + // Push 'this' into stack before OSR entry point to bail out compilation + mv.visitVarInsn(ALOAD, 0); + // Setup loop counter + mv.visitInsn(ICONST_0); + mv.visitVarInsn(ISTORE, 1); + // Begin loop + mv.visitLabel(osrEntryPoint); + // Increment loop counter + mv.visitVarInsn(ILOAD, 1); + mv.visitInsn(ICONST_1); + mv.visitInsn(IADD); + // Duplicate it for loop condition check + mv.visitInsn(DUP); + mv.visitVarInsn(ISTORE, 1); + // Check loop condition + mv.visitLdcInsn(TestOSRWithNonEmptyStack.ITERATIONS); + mv.visitJumpInsn(IF_ICMPLT, osrEntryPoint); + // Pop 'this'. + mv.visitInsn(POP); + mv.visitInsn(RETURN); + + mv.visitMaxs(0, 0); + mv.visitEnd(); + } + + private void run() { + byte[] bytecode = TestOSRWithNonEmptyStack.generateTestClass(); + + try { + Class klass = defineClass(TestOSRWithNonEmptyStack.CLASS_NAME, + bytecode, 0, bytecode.length); + + Constructor ctor = klass.getConstructor(); + Method method = klass.getDeclaredMethod( + TestOSRWithNonEmptyStack.METHOD_NAME); + + Object testCase = ctor.newInstance(); + method.invoke(testCase); + } catch (Exception e) { + throw new RuntimeException( + "Test bug: generated class should be valid.", e); + } + } + + public static void main(String args[]) { + new TestOSRWithNonEmptyStack().run(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/osr/TestRangeCheck.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestRangeCheck + * @bug 8054883 + * @summary Tests that range check is not skipped + */ + +public class TestRangeCheck { + public static void main(String args[]) { + try { + test(); + throw new AssertionError("Expected ArrayIndexOutOfBoundsException was not thrown"); + } catch (ArrayIndexOutOfBoundsException e) { + System.out.println("Expected ArrayIndexOutOfBoundsException was thrown"); + } + } + + private static void test() { + int arr[] = new int[1]; + int result = 1; + + // provoke OSR compilation + for (int i = 0; i < Integer.MAX_VALUE; i++) { + } + + if (result > 0 && arr[~result] > 0) { + arr[~result] = 0; + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/rangechecks/TestRangeCheckSmearing.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8066103 + * @summary C2's range check smearing allows out of bound array accesses + * @library /testlibrary /testlibrary/whitebox /compiler/whitebox /testlibrary/com/oracle/java/testlibrary + * @build TestRangeCheckSmearing + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform + * @run main/othervm -ea -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestRangeCheckSmearing + * + */ + +import java.lang.annotation.*; +import java.lang.reflect.*; +import java.util.*; +import sun.hotspot.WhiteBox; +import sun.hotspot.code.NMethod; +import com.oracle.java.testlibrary.Platform; + +public class TestRangeCheckSmearing { + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + + @Retention(RetentionPolicy.RUNTIME) + @interface Args { int[] value(); } + + // first range check is i + max of all constants + @Args({0, 8}) + static int m1(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+9]; + if (allaccesses) { + res += array[i+8]; + res += array[i+7]; + res += array[i+6]; + res += array[i+5]; + res += array[i+4]; + res += array[i+3]; + res += array[i+2]; + res += array[i+1]; + } + return res; + } + + // first range check is i + min of all constants + @Args({0, -9}) + static int m2(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+1]; + if (allaccesses) { + res += array[i+2]; + res += array[i+3]; + res += array[i+4]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + // first range check is not i + min/max of all constants + @Args({0, 8}) + static int m3(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + if (allaccesses) { + res += array[i+2]; + res += array[i+1]; + res += array[i+4]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({0, -9}) + static int m4(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + if (allaccesses) { + res += array[i+4]; + res += array[i+1]; + res += array[i+2]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({0, -3}) + static int m5(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i+2]; + if (allaccesses) { + res += array[i+1]; + res += array[i+4]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({0, 6}) + static int m6(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i+4]; + if (allaccesses) { + res += array[i+2]; + res += array[i+1]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({0, 6}) + static int m7(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i+2]; + res += array[i+4]; + if (allaccesses) { + res += array[i+1]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({0, -3}) + static int m8(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i+4]; + res += array[i+2]; + if (allaccesses) { + res += array[i+1]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({6, 15}) + static int m9(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + if (allaccesses) { + res += array[i-2]; + res += array[i-1]; + res += array[i-4]; + res += array[i-5]; + res += array[i-6]; + } + return res; + } + + @Args({3, 12}) + static int m10(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + if (allaccesses) { + res += array[i-2]; + res += array[i-1]; + res += array[i-3]; + res += array[i+4]; + res += array[i+5]; + res += array[i+6]; + } + return res; + } + + @Args({3, -3}) + static int m11(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i-2]; + if (allaccesses) { + res += array[i+5]; + res += array[i+6]; + } + return res; + } + + @Args({3, 6}) + static int m12(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i+6]; + if (allaccesses) { + res += array[i-2]; + res += array[i-3]; + } + return res; + } + + // check that identical range check is replaced by dominating one + // only when correct + @Args({0}) + static int m13(int[] array, int i, boolean ignore) { + int res = 0; + res += array[i+3]; + res += array[i+3]; + return res; + } + + @Args({2, 0}) + static int m14(int[] array, int i, boolean ignore) { + int res = 0; + + res += array[i]; + res += array[i-2]; + res += array[i]; // If range check below were to be removed first this cannot be considered identical to first range check + res += array[i-1]; // range check removed so i-1 array access depends on previous check + + return res; + } + + static int[] m15_dummy = new int[10]; + @Args({2, 0}) + static int m15(int[] array, int i, boolean ignore) { + int res = 0; + res += array[i]; + + // When the loop is optimized out we don't want the + // array[i-1] access which is dependent on array[i]'s + // range check to become dependent on the identical range + // check above. + + int[] array2 = m15_dummy; + int j = 0; + for (; j < 10; j++); + if (j == 10) { + array2 = array; + } + + res += array2[i-2]; + res += array2[i]; + res += array2[i-1]; // range check removed so i-1 array access depends on previous check + + return res; + } + + @Args({2, 0}) + static int m16(int[] array, int i, boolean ignore) { + int res = 0; + + res += array[i]; + res += array[i-1]; + res += array[i-1]; + res += array[i-2]; + + return res; + } + + @Args({2, 0}) + static int m17(int[] array, int i, boolean ignore) { + int res = 0; + + res += array[i]; + res += array[i-2]; + res += array[i-2]; + res += array[i+2]; + res += array[i+2]; + res += array[i-1]; + res += array[i-1]; + + return res; + } + + static public void main(String[] args) { + if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) { + throw new AssertionError("Background compilation enabled"); + } + new TestRangeCheckSmearing().doTests(); + } + boolean success = true; + boolean exception = false; + final int[] array = new int[10]; + final HashMap tests = new HashMap<>(); + { + final Class TEST_PARAM_TYPES[] = { int[].class, int.class, boolean.class }; + for (Method m : this.getClass().getDeclaredMethods()) { + if (m.getName().matches("m[0-9]+")) { + assert(Modifier.isStatic(m.getModifiers())) : m; + assert(m.getReturnType() == int.class) : m; + assert(Arrays.equals(m.getParameterTypes(), TEST_PARAM_TYPES)) : m; + tests.put(m.getName(), m); + } + } + } + + void invokeTest(Method m, int[] array, int index, boolean z) { + try { + m.invoke(null, array, index, z); + } catch (ReflectiveOperationException roe) { + Throwable ex = roe.getCause(); + if (ex instanceof ArrayIndexOutOfBoundsException) + throw (ArrayIndexOutOfBoundsException) ex; + throw new AssertionError(roe); + } + } + + void doTest(String name) { + Method m = tests.get(name); + tests.remove(name); + int[] args = m.getAnnotation(Args.class).value(); + int index0 = args[0], index1; + boolean exceptionRequired = true; + if (args.length == 2) { + index1 = args[1]; + } else { + // no negative test for this one + assert(args.length == 1); + assert(name.equals("m13")); + exceptionRequired = false; + index1 = index0; + } + // Get the method compiled. + if (!WHITE_BOX.isMethodCompiled(m)) { + // If not, try to compile it with C2 + if(!WHITE_BOX.enqueueMethodForCompilation(m, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION)) { + // C2 compiler not available, try to compile with C1 + WHITE_BOX.enqueueMethodForCompilation(m, CompilerWhiteBoxTest.COMP_LEVEL_SIMPLE); + } + } + if (!WHITE_BOX.isMethodCompiled(m)) { + throw new RuntimeException(m + " not compiled"); + } + + // valid access + invokeTest(m, array, index0, true); + + if (!WHITE_BOX.isMethodCompiled(m)) { + throw new RuntimeException(m + " deoptimized on valid array access"); + } + + exception = false; + boolean test_success = true; + try { + invokeTest(m, array, index1, false); + } catch(ArrayIndexOutOfBoundsException aioob) { + exception = true; + System.out.println("ArrayIndexOutOfBoundsException thrown in "+name); + } + if (!exception) { + System.out.println("ArrayIndexOutOfBoundsException was not thrown in "+name); + } + + if (Platform.isServer()) { + if (exceptionRequired == WHITE_BOX.isMethodCompiled(m)) { + System.out.println((exceptionRequired?"Didn't deoptimized":"deoptimized") + " in "+name); + test_success = false; + } + } + + if (exception != exceptionRequired) { + System.out.println((exceptionRequired?"exception required but not thrown":"not exception required but thrown") + " in "+name); + test_success = false; + } + + if (!test_success) { + success = false; + System.out.println("TEST FAILED: "+name); + } + + } + void doTests() { + doTest("m1"); + doTest("m2"); + doTest("m3"); + doTest("m4"); + doTest("m5"); + doTest("m6"); + doTest("m7"); + doTest("m8"); + doTest("m9"); + doTest("m10"); + doTest("m11"); + doTest("m12"); + doTest("m13"); + doTest("m14"); + doTest("m15"); + doTest("m16"); + doTest("m17"); + if (!success) { + throw new RuntimeException("Some tests failed"); + } + assert(tests.isEmpty()) : tests; + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/rangechecks/TestRangeCheckSmearingLoopOpts.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8048170 + * @summary Following range check smearing, range check cannot be replaced by dominating identical test. + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestRangeCheckSmearingLoopOpts + * + */ +public class TestRangeCheckSmearingLoopOpts { + + static int dummy; + + static int m1(int[] array, int i) { + for (;;) { + for (;;) { + if (array[i] < 0) { // range check (i+0) dominates equivalent check below + break; + } + i++; + } + + // A control flow that stops IfNode::up_one_dom() + if ((i % 2)== 0) { + if ((array[i] % 2) == 0) { + dummy = i; + } + } + + // IfNode::Ideal will rewrite some range checks if Compile::allow_range_check_smearing + if (array[i-1] == 9) { // range check (i-1) unchanged + int res = array[i-3]; // range check (i-3) unchanged + res += array[i]; // range check (i+0) unchanged + res += array[i-2]; // removed redundant range check + // the previous access might be hoisted by + // PhaseIdealLoop::split_if_with_blocks_post because + // it appears to have the same guard, but it also + // depends on the previous guards + return res; + } + i++; + } + } + + static public void main(String[] args) { + int[] array = { 0, 1, 2, -3, 4, 5, -2, 7, 8, 9, -1 }; + for (int i = 0; i < 20000; i++) { + m1(array, 0); + } + array[0] = -1; + try { + m1(array, 0); + } catch(ArrayIndexOutOfBoundsException aioobe) {} + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/relocations/TestPrintRelocations.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8044538 + * @summary assert hit while printing relocations for jump table entries + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:+PrintRelocations TestPrintRelocations + */ + +/** + * The test compiles all methods (-Xcomp) and prints their relocation + * entries (-XX:+PrintRelocations) to make sure the printing works. + */ +public class TestPrintRelocations { + + static public void main(String[] args) { } +} --- ./hotspot/test/compiler/rtm/cli/TestRTMRetryCountOption.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/rtm/cli/TestRTMRetryCountOption.java Wed Feb 04 12:14:39 2015 -0800 @@ -35,7 +35,7 @@ private static final String DEFAULT_VALUE = "5"; private TestRTMRetryCountOption() { - super(Boolean.TRUE::booleanValue, "RTMRetryCount", false, true, + super(Boolean.TRUE::booleanValue, "RTMRetryCount", false, false, TestRTMRetryCountOption.DEFAULT_VALUE, "0", "10", "100", "1000"); } --- ./hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnSupportedConfig.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnSupportedConfig.java Wed Feb 04 12:14:39 2015 -0800 @@ -50,25 +50,15 @@ @Override public void runTestCases() throws Throwable { - String experimentalOptionError - = CommandLineOptionTest.getExperimentalOptionErrorMessage( - "UseRTMDeopt"); - // verify that option is experimental + // verify that option could be turned on CommandLineOptionTest.verifySameJVMStartup( - new String[] { experimentalOptionError }, null, ExitCode.FAIL, - "-XX:+UseRTMDeopt"); - // verify that option could be turned on - CommandLineOptionTest.verifySameJVMStartup(null, null, ExitCode.OK, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, - "-XX:+UseRTMDeopt"); + null, null, ExitCode.OK, "-XX:+UseRTMDeopt"); // verify that option could be turned off - CommandLineOptionTest.verifySameJVMStartup(null, null, ExitCode.OK, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, - "-XX:-UseRTMDeopt"); + CommandLineOptionTest.verifySameJVMStartup( + null, null, ExitCode.OK, "-XX:-UseRTMDeopt"); // verify default value CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt", - TestUseRTMDeoptOptionOnSupportedConfig.DEFAULT_VALUE, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS); + TestUseRTMDeoptOptionOnSupportedConfig.DEFAULT_VALUE); // verify default value CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt", TestUseRTMDeoptOptionOnSupportedConfig.DEFAULT_VALUE, --- ./hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnUnsupportedConfig.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnUnsupportedConfig.java Wed Feb 04 12:14:39 2015 -0800 @@ -48,7 +48,7 @@ private TestUseRTMDeoptOptionOnUnsupportedConfig() { super(new NotPredicate(new AndPredicate(new SupportedCPU(), new SupportedVM())), - "UseRTMDeopt", true, true, + "UseRTMDeopt", true, false, TestUseRTMDeoptOptionOnUnsupportedConfig.DEFAULT_VALUE, "true"); } @@ -57,14 +57,11 @@ super.verifyJVMStartup(); // verify default value CommandLineOptionTest.verifyOptionValueForSameVM(optionName, - defaultValue, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS); + defaultValue); // verify that until RTMLocking is not used, value // will be set to default false. CommandLineOptionTest.verifyOptionValueForSameVM(optionName, - defaultValue, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, - "-XX:+UseRTMDeopt"); + defaultValue, "-XX:+UseRTMDeopt"); } public static void main(String args[]) throws Throwable { --- ./hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnSupportedConfig.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnSupportedConfig.java Wed Feb 04 12:14:39 2015 -0800 @@ -51,15 +51,8 @@ @Override public void runTestCases() throws Throwable { String unrecongnizedOption - = CommandLineOptionTest.getUnrecognizedOptionErrorMessage( + = CommandLineOptionTest.getUnrecognizedOptionErrorMessage( "UseRTMLocking"); - String experimentalOptionError - = CommandLineOptionTest.getExperimentalOptionErrorMessage( - "UseRTMLocking"); - // verify that options is experimental - CommandLineOptionTest.verifySameJVMStartup( - new String[] { experimentalOptionError }, null, ExitCode.FAIL, - "-XX:+UseRTMLocking"); // verify that there are no warning or error in VM output CommandLineOptionTest.verifySameJVMStartup(null, new String[]{ @@ -67,7 +60,8 @@ unrecongnizedOption }, ExitCode.OK, CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, - "-XX:+UseRTMLocking"); + "-XX:+UseRTMLocking" + ); CommandLineOptionTest.verifySameJVMStartup(null, new String[]{ @@ -75,7 +69,8 @@ unrecongnizedOption }, ExitCode.OK, CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, - "-XX:-UseRTMLocking"); + "-XX:-UseRTMLocking" + ); // verify that UseRTMLocking is of by default CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking", TestUseRTMLockingOptionOnSupportedConfig.DEFAULT_VALUE, --- ./hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedCPU.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedCPU.java Wed Feb 04 12:14:39 2015 -0800 @@ -63,9 +63,7 @@ CommandLineOptionTest.verifySameJVMStartup( new String[] { errorMessage }, new String[] { unrecongnizedOption }, - ExitCode.FAIL, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, - "-XX:+UseRTMLocking"); + ExitCode.FAIL, "-XX:+UseRTMLocking"); // verify that we can pass -UseRTMLocking without // getting any error messages CommandLineOptionTest.verifySameJVMStartup( @@ -73,27 +71,20 @@ new String[]{ errorMessage, unrecongnizedOption - }, ExitCode.OK, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, - "-XX:-UseRTMLocking"); + }, ExitCode.OK, "-XX:-UseRTMLocking"); // verify that UseRTMLocking is false by default CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking", - TestUseRTMLockingOptionOnUnsupportedCPU.DEFAULT_VALUE, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS); + TestUseRTMLockingOptionOnUnsupportedCPU.DEFAULT_VALUE); } else { // verify that on non-x86 CPUs RTMLocking could not be used CommandLineOptionTest.verifySameJVMStartup( new String[] { unrecongnizedOption }, - null, ExitCode.FAIL, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, - "-XX:+UseRTMLocking"); + null, ExitCode.FAIL, "-XX:+UseRTMLocking"); CommandLineOptionTest.verifySameJVMStartup( new String[] { unrecongnizedOption }, - null, ExitCode.FAIL, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, - "-XX:-UseRTMLocking"); + null, ExitCode.FAIL, "-XX:-UseRTMLocking"); } } --- ./hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedVM.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedVM.java Wed Feb 04 12:14:39 2015 -0800 @@ -53,27 +53,17 @@ public void runTestCases() throws Throwable { String errorMessage = RTMGenericCommandLineOptionTest.RTM_UNSUPPORTED_VM_ERROR; - String experimentalOptionError - = CommandLineOptionTest.getExperimentalOptionErrorMessage( - "UseRTMLocking"); - // verify that options is experimental - CommandLineOptionTest.verifySameJVMStartup( - new String[] { experimentalOptionError }, null, ExitCode.FAIL, - "-XX:+UseRTMLocking"); // verify that we can't use +UseRTMLocking CommandLineOptionTest.verifySameJVMStartup( new String[] { errorMessage }, null, ExitCode.FAIL, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, "-XX:+UseRTMLocking"); // verify that we can turn it off CommandLineOptionTest.verifySameJVMStartup(null, new String[] { errorMessage }, ExitCode.OK, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, "-XX:-UseRTMLocking"); // verify that it is off by default CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking", - TestUseRTMLockingOptionOnUnsupportedVM.DEFAULT_VALUE, - CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS); + TestUseRTMLockingOptionOnUnsupportedVM.DEFAULT_VALUE); } public static void main(String args[]) throws Throwable { --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/startup/NumCompilerThreadsCheck.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8034775 + * @summary Ensures correct minimal number of compiler threads (provided by -XX:CICompilerCount=) + * @library /testlibrary + */ +import com.oracle.java.testlibrary.*; + +public class NumCompilerThreadsCheck { + + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:CICompilerCount=-1"); + OutputAnalyzer out = new OutputAnalyzer(pb.start()); + + String expectedOutput = "CICompilerCount of -1 is invalid"; + out.shouldContain(expectedOutput); + + if (isZeroVm()) { + String expectedLowWaterMarkText = "must be at least 0"; + out.shouldContain(expectedLowWaterMarkText); + } + } + + private static boolean isZeroVm() { + String vmName = System.getProperty("java.vm.name"); + if (vmName == null) { + throw new RuntimeException("No VM name"); + } + if (vmName.toLowerCase().contains("zero")) { + return true; + } + return false; + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/testlibrary/intrinsics/Verifier.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package intrinsics; + +import java.io.BufferedReader; +import java.io.FileReader; +import java.util.Properties; + +public class Verifier { + enum VerificationStrategy { + VERIFY_STRONG_EQUALITY { + @Override + void verify(Properties expectedProperties, int fullMatchCnt, + int suspectCnt) { + int expectedCount = Integer.parseInt( + expectedProperties.getProperty( + Verifier.INTRINSIC_EXPECTED_COUNT_PROPERTY)); + String intrinsicID = expectedProperties.getProperty( + Verifier.INTRINSIC_NAME_PROPERTY); + + System.out.println("Intrinsic " + intrinsicID + + " verification, expected: " + expectedCount + + ", matched: " + fullMatchCnt + + ", suspected: " + suspectCnt); + if (expectedCount != fullMatchCnt) { + throw new RuntimeException( + "Unexpected count of intrinsic " + + intrinsicID + + " expected:" + expectedCount + + ", matched: " + fullMatchCnt + + ", suspected: " + suspectCnt); + } + } + }, + + VERIFY_INTRINSIC_USAGE { + @Override + void verify(Properties expectedProperties, int fullMatchCnt, + int suspectCnt) { + boolean isExpected = Boolean.parseBoolean( + expectedProperties.getProperty( + Verifier.INTRINSIC_IS_EXPECTED_PROPERTY)); + String intrinsicID = expectedProperties.getProperty( + Verifier.INTRINSIC_NAME_PROPERTY); + + System.out.println("Intrinsic " + intrinsicID + + " verification, is expected: " + isExpected + + ", matched: " + fullMatchCnt + + ", suspected: " + suspectCnt); + if ((fullMatchCnt == 0 && isExpected) + || (fullMatchCnt > 0 && !isExpected)) { + throw new RuntimeException( + "Unexpected count of intrinsic " + + intrinsicID + + " is expected:" + isExpected + + ", matched: " + fullMatchCnt + + ", suspected: " + suspectCnt); + } + } + }; + + void verify(Properties expectedProperties, int fullMathCnt, + int suspectCnt) { + throw new RuntimeException("Default strategy is not implemented."); + } + } + + public static final String PROPERTY_FILE_SUFFIX = ".verify.properties"; + public static final String INTRINSIC_NAME_PROPERTY = "intrinsic.name"; + public static final String INTRINSIC_IS_EXPECTED_PROPERTY + = "intrinsic.expected"; + public static final String INTRINSIC_EXPECTED_COUNT_PROPERTY + = "intrinsic.expectedCount"; + private static final String DEFAULT_STRATEGY + = VerificationStrategy.VERIFY_STRONG_EQUALITY.name(); + + public static void main(String[] args) throws Exception { + if (args.length == 0) { + throw new RuntimeException("Test bug, nothing to verify"); + } + for (String hsLogFile : args) { + verify(hsLogFile); + } + } + + private static void verify(String hsLogFile) throws Exception { + System.out.println("Verifying " + hsLogFile); + + Properties expectedProperties = new Properties(); + FileReader reader = new FileReader(hsLogFile + + Verifier.PROPERTY_FILE_SUFFIX); + expectedProperties.load(reader); + reader.close(); + + int fullMatchCnt = 0; + int suspectCnt = 0; + String intrinsicId = expectedProperties.getProperty( + Verifier.INTRINSIC_NAME_PROPERTY); + String prefix = " { + boolean isTiered = IntrinsicPredicates.WHITE_BOX.getBooleanVMFlag( + "TieredCompilation"); + long tieredMaxLevel = IntrinsicPredicates.WHITE_BOX.getIntxVMFlag( + "TieredStopAtLevel"); + boolean maxLevelIsReachable = (tieredMaxLevel + == IntrinsicPredicates.TIERED_MAX_LEVEL); + return Platform.isServer() && (!isTiered || maxLevelIsReachable); + }; + + public static final BooleanSupplier SHA1_INSTRUCTION_AVAILABLE + = new CPUSpecificPredicate("sparc.*", new String[] { "sha1" }, + null); + + public static final BooleanSupplier SHA256_INSTRUCTION_AVAILABLE + = new CPUSpecificPredicate("sparc.*", new String[] { "sha256" }, + null); + + public static final BooleanSupplier SHA512_INSTRUCTION_AVAILABLE + = new CPUSpecificPredicate("sparc.*", new String[] { "sha512" }, + null); + + public static final BooleanSupplier ANY_SHA_INSTRUCTION_AVAILABLE + = new OrPredicate(IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE, + new OrPredicate( + IntrinsicPredicates.SHA256_INSTRUCTION_AVAILABLE, + IntrinsicPredicates.SHA512_INSTRUCTION_AVAILABLE)); + + public static final BooleanSupplier SHA1_INTRINSICS_AVAILABLE + = new AndPredicate(new AndPredicate( + IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE, + IntrinsicPredicates.COMPILABLE_BY_C2), + IntrinsicPredicates.booleanOptionValue("UseSHA1Intrinsics")); + + public static final BooleanSupplier SHA256_INTRINSICS_AVAILABLE + = new AndPredicate(new AndPredicate( + IntrinsicPredicates.SHA256_INSTRUCTION_AVAILABLE, + IntrinsicPredicates.COMPILABLE_BY_C2), + IntrinsicPredicates.booleanOptionValue("UseSHA256Intrinsics")); + + public static final BooleanSupplier SHA512_INTRINSICS_AVAILABLE + = new AndPredicate(new AndPredicate( + IntrinsicPredicates.SHA512_INSTRUCTION_AVAILABLE, + IntrinsicPredicates.COMPILABLE_BY_C2), + IntrinsicPredicates.booleanOptionValue("UseSHA512Intrinsics")); + + private static BooleanSupplier booleanOptionValue(String option) { + return () -> IntrinsicPredicates.WHITE_BOX.getBooleanVMFlag(option); + } + + private IntrinsicPredicates() { + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/uncommontrap/TestDeoptOOM.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,426 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 6898462 + * @summary failed reallocations of scalar replaced objects during deoptimization causes crash + * @run main/othervm -XX:-BackgroundCompilation -XX:CompileCommand=exclude,TestDeoptOOM::main -XX:CompileCommand=exclude,TestDeoptOOM::m9_1 -Xmx128M TestDeoptOOM + * + */ + +public class TestDeoptOOM { + + long f1; + long f2; + long f3; + long f4; + long f5; + + static class LinkedList { + LinkedList l; + long[] array; + LinkedList(LinkedList l, int size) { + array = new long[size]; + this.l = l; + } + } + + static LinkedList ll; + + static void consume_all_memory() { + int size = 128 * 1024 * 1024; + while(size > 0) { + try { + while(true) { + ll = new LinkedList(ll, size); + } + } catch(OutOfMemoryError oom) { + } + size = size / 2; + } + } + + static void free_memory() { + ll = null; + } + + static TestDeoptOOM m1(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (deopt) { + return tdoom; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m1"); + } + return null; + } + + static TestDeoptOOM m2_1(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (deopt) { + return tdoom; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m2_1"); + } + return null; + } + + static TestDeoptOOM m2(boolean deopt) { + try { + return m2_1(deopt); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m2"); + } + return null; + } + + static TestDeoptOOM m3_3(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (deopt) { + return tdoom; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m3_3"); + } + return null; + } + + static boolean m3_2(boolean deopt) { + try { + return m3_3(deopt) != null; + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m3_2"); + } + return false; + } + + static TestDeoptOOM m3_1(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (m3_2(deopt)) { + return tdoom; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m3_1"); + } + return null; + } + + static TestDeoptOOM m3(boolean deopt) { + try { + return m3_1(deopt); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m3"); + } + return null; + } + + static TestDeoptOOM m4(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (deopt) { + tdoom.f1 = 1l; + tdoom.f2 = 2l; + tdoom.f3 = 3l; + return tdoom; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m4"); + } + return null; + } + + static TestDeoptOOM m5(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + synchronized(tdoom) { + if (deopt) { + return tdoom; + } + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m5"); + } + return null; + } + + synchronized TestDeoptOOM m6_1(boolean deopt) { + if (deopt) { + return this; + } + return null; + } + + static TestDeoptOOM m6(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + return tdoom.m6_1(deopt); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m6"); + } + return null; + } + + static TestDeoptOOM m7_1(boolean deopt, Object lock) { + try { + synchronized(lock) { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (deopt) { + return tdoom; + } + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m7_1"); + } + return null; + } + + static TestDeoptOOM m7(boolean deopt, Object lock) { + try { + return m7_1(deopt, lock); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m7"); + } + return null; + } + + static class A { + long f1; + long f2; + long f3; + long f4; + long f5; + } + + static class B { + long f1; + long f2; + long f3; + long f4; + long f5; + + A a; + } + + static B m8(boolean deopt) { + try { + A a = new A(); + B b = new B(); + b.a = a; + if (deopt) { + return b; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m8"); + } + return null; + } + + static void m9_1(int i) { + if (i > 90000) { + consume_all_memory(); + } + } + + static TestDeoptOOM m9() { + try { + for (int i = 0; i < 100000; i++) { + TestDeoptOOM tdoom = new TestDeoptOOM(); + m9_1(i); + if (i > 90000) { + return tdoom; + } + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m1"); + } + return null; + } + + public static void main(String[] args) { + for (int i = 0; i < 20000; i++) { + m1(false); + } + + consume_all_memory(); + + try { + m1(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main " + oom.getMessage()); + } + + free_memory(); + + for (int i = 0; i < 20000; i++) { + m2(false); + } + + consume_all_memory(); + + try { + m2(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + for (int i = 0; i < 20000; i++) { + m3(false); + } + + consume_all_memory(); + + try { + m3(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + for (int i = 0; i < 20000; i++) { + m4(false); + } + + consume_all_memory(); + + try { + m4(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + for (int i = 0; i < 20000; i++) { + m5(false); + } + + consume_all_memory(); + + try { + m5(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + for (int i = 0; i < 20000; i++) { + m6(false); + } + + consume_all_memory(); + + try { + m6(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + final Object lock = new Object(); + + for (int i = 0; i < 20000; i++) { + m7(false, lock); + } + + consume_all_memory(); + + try { + m7(true, lock); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + Thread thread = new Thread() { + public void run() { + System.out.println("Acquiring lock"); + synchronized(lock) { + System.out.println("Lock acquired"); + } + System.out.println("Lock released"); + } + }; + thread.start(); + try { + thread.join(); + } catch(InterruptedException ie) { + } + + for (int i = 0; i < 20000; i++) { + m8(false); + } + + consume_all_memory(); + + try { + m8(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + try { + m9(); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/uncommontrap/TraceDeoptimizationNoRealloc.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8067144 + * @summary -XX:+TraceDeoptimization tries to print realloc'ed objects even when there are none + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:+IgnoreUnrecognizedVMOptions -XX:+TraceDeoptimization TraceDeoptimizationNoRealloc + * + */ + +public class TraceDeoptimizationNoRealloc { + + static void m(boolean some_condition) { + if (some_condition) { + return; + } + } + + + static public void main(String[] args) { + for (int i = 0; i < 20000; i++) { + m(false); + } + m(true); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/unsafe/UnsafeRaw.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8058744 + * @summary Invalid pattern-matching of address computations in raw unsafe + * @library /testlibrary + * @run main/othervm -Xbatch UnsafeRaw + */ + +import com.oracle.java.testlibrary.Utils; +import java.util.Random; + +public class UnsafeRaw { + public static class Tests { + public static int int_index(sun.misc.Unsafe unsafe, long base, int index) throws Exception { + return unsafe.getInt(base + (index << 2)); + } + public static int long_index(sun.misc.Unsafe unsafe, long base, long index) throws Exception { + return unsafe.getInt(base + (index << 2)); + } + public static int int_index_back_ashift(sun.misc.Unsafe unsafe, long base, int index) throws Exception { + return unsafe.getInt(base + (index >> 2)); + } + public static int int_index_back_lshift(sun.misc.Unsafe unsafe, long base, int index) throws Exception { + return unsafe.getInt(base + (index >>> 2)); + } + public static int long_index_back_ashift(sun.misc.Unsafe unsafe, long base, long index) throws Exception { + return unsafe.getInt(base + (index >> 2)); + } + public static int long_index_back_lshift(sun.misc.Unsafe unsafe, long base, long index) throws Exception { + return unsafe.getInt(base + (index >>> 2)); + } + public static int int_const_12345678_index(sun.misc.Unsafe unsafe, long base) throws Exception { + int idx4 = 0x12345678; + return unsafe.getInt(base + idx4); + } + public static int long_const_1234567890abcdef_index(sun.misc.Unsafe unsafe, long base) throws Exception { + long idx5 = 0x1234567890abcdefL; + return unsafe.getInt(base + idx5); + } + public static int int_index_mul(sun.misc.Unsafe unsafe, long base, int index) throws Exception { + return unsafe.getInt(base + (index * 4)); + } + public static int long_index_mul(sun.misc.Unsafe unsafe, long base, long index) throws Exception { + return unsafe.getInt(base + (index * 4)); + } + public static int int_index_mul_scale_16(sun.misc.Unsafe unsafe, long base, int index) throws Exception { + return unsafe.getInt(base + (index * 16)); + } + public static int long_index_mul_scale_16(sun.misc.Unsafe unsafe, long base, long index) throws Exception { + return unsafe.getInt(base + (index * 16)); + } + } + + public static void main(String[] args) throws Exception { + sun.misc.Unsafe unsafe = Utils.getUnsafe(); + final int array_size = 128; + final int element_size = 4; + final int magic = 0x12345678; + + Random rnd = new Random(); + + long array = unsafe.allocateMemory(array_size * element_size); // 128 ints + long addr = array + array_size * element_size / 2; // something in the middle to work with + unsafe.putInt(addr, magic); + for (int j = 0; j < 100000; j++) { + if (Tests.int_index(unsafe, addr, 0) != magic) throw new Exception(); + if (Tests.long_index(unsafe, addr, 0) != magic) throw new Exception(); + if (Tests.int_index_mul(unsafe, addr, 0) != magic) throw new Exception(); + if (Tests.long_index_mul(unsafe, addr, 0) != magic) throw new Exception(); + { + long idx1 = rnd.nextLong(); + long addr1 = addr - (idx1 << 2); + if (Tests.long_index(unsafe, addr1, idx1) != magic) throw new Exception(); + } + { + long idx2 = rnd.nextLong(); + long addr2 = addr - (idx2 >> 2); + if (Tests.long_index_back_ashift(unsafe, addr2, idx2) != magic) throw new Exception(); + } + { + long idx3 = rnd.nextLong(); + long addr3 = addr - (idx3 >>> 2); + if (Tests.long_index_back_lshift(unsafe, addr3, idx3) != magic) throw new Exception(); + } + { + long idx4 = 0x12345678; + long addr4 = addr - idx4; + if (Tests.int_const_12345678_index(unsafe, addr4) != magic) throw new Exception(); + } + { + long idx5 = 0x1234567890abcdefL; + long addr5 = addr - idx5; + if (Tests.long_const_1234567890abcdef_index(unsafe, addr5) != magic) throw new Exception(); + } + { + int idx6 = rnd.nextInt(); + long addr6 = addr - (idx6 >> 2); + if (Tests.int_index_back_ashift(unsafe, addr6, idx6) != magic) throw new Exception(); + } + { + int idx7 = rnd.nextInt(); + long addr7 = addr - (idx7 >>> 2); + if (Tests.int_index_back_lshift(unsafe, addr7, idx7) != magic) throw new Exception(); + } + { + int idx8 = rnd.nextInt(); + long addr8 = addr - (idx8 * 16); + if (Tests.int_index_mul_scale_16(unsafe, addr8, idx8) != magic) throw new Exception(); + } + { + long idx9 = rnd.nextLong(); + long addr9 = addr - (idx9 * 16); + if (Tests.long_index_mul_scale_16(unsafe, addr9, idx9) != magic) throw new Exception(); + } + } + } +} --- ./hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -72,9 +72,9 @@ /** Flag for verbose output, true if {@code -Dverbose} specified */ protected static final boolean IS_VERBOSE = System.getProperty("verbose") != null; - /** count of invocation to triger compilation */ + /** invocation count to trigger compilation */ protected static final int THRESHOLD; - /** count of invocation to triger OSR compilation */ + /** invocation count to trigger OSR compilation */ protected static final long BACKEDGE_THRESHOLD; /** Value of {@code java.vm.info} (interpreted|mixed|comp mode) */ protected static final String MODE = System.getProperty("java.vm.info"); @@ -206,7 +206,6 @@ * is compiled, or if {@linkplain #method} has zero * compilation level. */ - protected final void checkNotCompiled(int compLevel) { if (WHITE_BOX.isMethodQueuedForCompilation(method)) { throw new RuntimeException(method + " must not be in queue"); @@ -227,20 +226,30 @@ * compilation level. */ protected final void checkNotCompiled() { + checkNotCompiled(true); + checkNotCompiled(false); + } + + /** + * Checks, that {@linkplain #method} is not (OSR-)compiled. + * + * @param isOsr Check for OSR compilation if true + * @throws RuntimeException if {@linkplain #method} is in compiler queue or + * is compiled, or if {@linkplain #method} has zero + * compilation level. + */ + protected final void checkNotCompiled(boolean isOsr) { + waitBackgroundCompilation(); if (WHITE_BOX.isMethodQueuedForCompilation(method)) { throw new RuntimeException(method + " must not be in queue"); } - if (WHITE_BOX.isMethodCompiled(method, false)) { - throw new RuntimeException(method + " must be not compiled"); + if (WHITE_BOX.isMethodCompiled(method, isOsr)) { + throw new RuntimeException(method + " must not be " + + (isOsr ? "osr_" : "") + "compiled"); } - if (WHITE_BOX.getMethodCompilationLevel(method, false) != 0) { - throw new RuntimeException(method + " comp_level must be == 0"); - } - if (WHITE_BOX.isMethodCompiled(method, true)) { - throw new RuntimeException(method + " must be not osr_compiled"); - } - if (WHITE_BOX.getMethodCompilationLevel(method, true) != 0) { - throw new RuntimeException(method + " osr_comp_level must be == 0"); + if (WHITE_BOX.getMethodCompilationLevel(method, isOsr) != 0) { + throw new RuntimeException(method + (isOsr ? " osr_" : " ") + + "comp_level must be == 0"); } } @@ -306,12 +315,21 @@ * Waits for completion of background compilation of {@linkplain #method}. */ protected final void waitBackgroundCompilation() { + waitBackgroundCompilation(method); + } + + /** + * Waits for completion of background compilation of the given executable. + * + * @param executable Executable + */ + protected static final void waitBackgroundCompilation(Executable executable) { if (!BACKGROUND_COMPILATION) { return; } final Object obj = new Object(); for (int i = 0; i < 10 - && WHITE_BOX.isMethodQueuedForCompilation(method); ++i) { + && WHITE_BOX.isMethodQueuedForCompilation(executable); ++i) { synchronized (obj) { try { obj.wait(1000); @@ -425,14 +443,14 @@ /** constructor test case */ CONSTRUCTOR_TEST(Helper.CONSTRUCTOR, Helper.CONSTRUCTOR_CALLABLE, false), /** method test case */ - METOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE, false), + METHOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE, false), /** static method test case */ STATIC_TEST(Helper.STATIC, Helper.STATIC_CALLABLE, false), /** OSR constructor test case */ OSR_CONSTRUCTOR_TEST(Helper.OSR_CONSTRUCTOR, Helper.OSR_CONSTRUCTOR_CALLABLE, true), /** OSR method test case */ - OSR_METOD_TEST(Helper.OSR_METHOD, Helper.OSR_METHOD_CALLABLE, true), + OSR_METHOD_TEST(Helper.OSR_METHOD, Helper.OSR_METHOD_CALLABLE, true), /** OSR static method test case */ OSR_STATIC_TEST(Helper.OSR_STATIC, Helper.OSR_STATIC_CALLABLE, true); @@ -494,7 +512,7 @@ = new Callable() { @Override public Integer call() throws Exception { - return new Helper(null).hashCode(); + return new Helper(null, CompilerWhiteBoxTest.BACKEDGE_THRESHOLD).hashCode(); } }; @@ -504,7 +522,7 @@ @Override public Integer call() throws Exception { - return helper.osrMethod(); + return helper.osrMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD); } }; @@ -512,7 +530,7 @@ = new Callable() { @Override public Integer call() throws Exception { - return osrStaticMethod(); + return osrStaticMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD); } }; @@ -532,25 +550,24 @@ } try { OSR_CONSTRUCTOR = Helper.class.getDeclaredConstructor( - Object.class); + Object.class, long.class); } catch (NoSuchMethodException | SecurityException e) { throw new RuntimeException( - "exception on getting method Helper.(Object)", e); + "exception on getting method Helper.(Object, long)", e); } METHOD = getMethod("method"); STATIC = getMethod("staticMethod"); - OSR_METHOD = getMethod("osrMethod"); - OSR_STATIC = getMethod("osrStaticMethod"); + OSR_METHOD = getMethod("osrMethod", long.class); + OSR_STATIC = getMethod("osrStaticMethod", long.class); } - private static Method getMethod(String name) { + private static Method getMethod(String name, Class... parameterTypes) { try { - return Helper.class.getDeclaredMethod(name); + return Helper.class.getDeclaredMethod(name, parameterTypes); } catch (NoSuchMethodException | SecurityException e) { throw new RuntimeException( "exception on getting method Helper." + name, e); } - } private static int staticMethod() { @@ -561,17 +578,84 @@ return 42; } - private static int osrStaticMethod() { + /** + * Deoptimizes all non-osr versions of the given executable after + * compilation finished. + * + * @param e Executable + * @throws Exception + */ + private static void waitAndDeoptimize(Executable e) { + CompilerWhiteBoxTest.waitBackgroundCompilation(e); + if (WhiteBox.getWhiteBox().isMethodQueuedForCompilation(e)) { + throw new RuntimeException(e + " must not be in queue"); + } + // Deoptimize non-osr versions of executable + WhiteBox.getWhiteBox().deoptimizeMethod(e, false); + } + + /** + * Executes the method multiple times to make sure we have + * enough profiling information before triggering an OSR + * compilation. Otherwise the C2 compiler may add uncommon traps. + * + * @param m Method to be executed + * @return Number of times the method was executed + * @throws Exception + */ + private static int warmup(Method m) throws Exception { + waitAndDeoptimize(m); + Helper helper = new Helper(); int result = 0; - for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) { + for (long i = 0; i < CompilerWhiteBoxTest.THRESHOLD; ++i) { + result += (int)m.invoke(helper, 1); + } + // Wait to make sure OSR compilation is not blocked by + // non-OSR compilation in the compile queue + CompilerWhiteBoxTest.waitBackgroundCompilation(m); + return result; + } + + /** + * Executes the constructor multiple times to make sure we + * have enough profiling information before triggering an OSR + * compilation. Otherwise the C2 compiler may add uncommon traps. + * + * @param c Constructor to be executed + * @return Number of times the constructor was executed + * @throws Exception + */ + private static int warmup(Constructor c) throws Exception { + waitAndDeoptimize(c); + int result = 0; + for (long i = 0; i < CompilerWhiteBoxTest.THRESHOLD; ++i) { + result += c.newInstance(null, 1).hashCode(); + } + // Wait to make sure OSR compilation is not blocked by + // non-OSR compilation in the compile queue + CompilerWhiteBoxTest.waitBackgroundCompilation(c); + return result; + } + + private static int osrStaticMethod(long limit) throws Exception { + int result = 0; + if (limit != 1) { + result = warmup(OSR_STATIC); + } + // Trigger osr compilation + for (long i = 0; i < limit; ++i) { result += staticMethod(); } return result; } - private int osrMethod() { + private int osrMethod(long limit) throws Exception { int result = 0; - for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) { + if (limit != 1) { + result = warmup(OSR_METHOD); + } + // Trigger osr compilation + for (long i = 0; i < limit; ++i) { result += method(); } return result; @@ -585,9 +669,13 @@ } // for OSR constructor test case - private Helper(Object o) { + private Helper(Object o, long limit) throws Exception { int result = 0; - for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) { + if (limit != 1) { + result = warmup(OSR_CONSTRUCTOR); + } + // Trigger osr compilation + for (long i = 0; i < limit; ++i) { result += method(); } x = result; --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/compiler/whitebox/DeoptimizeMultipleOSRTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sun.hotspot.WhiteBox; +import java.lang.reflect.Executable; +import java.lang.reflect.Method; + +/* + * @test DeoptimizeMultipleOSRTest + * @bug 8061817 + * @library /testlibrary /testlibrary/whitebox + * @build DeoptimizeMultipleOSRTest + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,DeoptimizeMultipleOSRTest::triggerOSR DeoptimizeMultipleOSRTest + * @summary testing of WB::deoptimizeMethod() + */ +public class DeoptimizeMultipleOSRTest { + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + private static final long BACKEDGE_THRESHOLD = 150000; + private Method method; + private int counter = 0; + + public static void main(String[] args) throws Exception { + DeoptimizeMultipleOSRTest test = new DeoptimizeMultipleOSRTest(); + test.test(); + } + + /** + * Triggers two different OSR compilations for the same method and + * checks if WhiteBox.deoptimizeMethod() deoptimizes both. + * + * @throws Exception + */ + public void test() throws Exception { + method = DeoptimizeMultipleOSRTest.class.getDeclaredMethod("triggerOSR", boolean.class, long.class); + // Trigger two OSR compiled versions + triggerOSR(true, BACKEDGE_THRESHOLD); + triggerOSR(false, BACKEDGE_THRESHOLD); + // Wait for compilation + CompilerWhiteBoxTest.waitBackgroundCompilation(method); + // Deoptimize + WHITE_BOX.deoptimizeMethod(method, true); + if (WHITE_BOX.isMethodCompiled(method, true)) { + throw new AssertionError("Not all OSR compiled versions were deoptimized"); + } + } + + /** + * Triggers OSR compilations by executing loops. + * + * @param first Determines which loop to execute + * @param limit The number of loop iterations + */ + public void triggerOSR(boolean first, long limit) { + if (limit != 1) { + // Warmup method to avoid uncommon traps + for (int i = 0; i < limit; ++i) { + triggerOSR(first, 1); + } + CompilerWhiteBoxTest.waitBackgroundCompilation(method); + } + if (first) { + // Trigger OSR compilation 1 + for (int i = 0; i < limit; ++i) { + counter++; + } + } else { + // Trigger OSR compilation 2 + for (int i = 0; i < limit; ++i) { + counter++; + } + } + } +} --- ./hotspot/test/compiler/whitebox/IsMethodCompilableTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/whitebox/IsMethodCompilableTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -28,7 +28,7 @@ * @build IsMethodCompilableTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform - * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest + * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:-TieredCompilation -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest * @summary testing of WB::isMethodCompilable() * @author igor.ignatyev@oracle.com */ --- ./hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -131,14 +131,15 @@ throw new RuntimeException(method + " is not compilable after clearMethodState()"); } - + // Make method not (OSR-)compilable (depending on testCase.isOsr()) makeNotCompilable(); if (isCompilable()) { throw new RuntimeException(method + " must be not compilable"); } - + // Try to (OSR-)compile method compile(); - checkNotCompiled(); + // Method should not be (OSR-)compiled + checkNotCompiled(testCase.isOsr()); if (isCompilable()) { throw new RuntimeException(method + " must be not compilable"); } --- ./hotspot/test/gc/arguments/TestDynMaxHeapFreeRatio.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/gc/arguments/TestDynMaxHeapFreeRatio.java Wed Feb 04 12:14:39 2015 -0800 @@ -21,6 +21,11 @@ * questions. */ +import static com.oracle.java.testlibrary.Asserts.assertEQ; +import static com.oracle.java.testlibrary.Asserts.assertFalse; +import static com.oracle.java.testlibrary.Asserts.assertTrue; +import com.oracle.java.testlibrary.DynamicVMOption; + /** * @test TestDynMaxHeapFreeRatio * @bug 8028391 @@ -33,32 +38,45 @@ * @run main/othervm -XX:MinHeapFreeRatio=51 -XX:MaxHeapFreeRatio=52 TestDynMaxHeapFreeRatio * @run main/othervm -XX:MinHeapFreeRatio=75 -XX:MaxHeapFreeRatio=100 TestDynMaxHeapFreeRatio */ -import com.oracle.java.testlibrary.TestDynamicVMOption; -import com.oracle.java.testlibrary.DynamicVMOptionChecker; - -public class TestDynMaxHeapFreeRatio extends TestDynamicVMOption { - - public static final String MinFreeRatioFlagName = "MinHeapFreeRatio"; - public static final String MaxFreeRatioFlagName = "MaxHeapFreeRatio"; - - public TestDynMaxHeapFreeRatio() { - super(MaxFreeRatioFlagName); - } - - public void test() { - - int minHeapFreeValue = DynamicVMOptionChecker.getIntValue(MinFreeRatioFlagName); - System.out.println(MinFreeRatioFlagName + " = " + minHeapFreeValue); - - testPercentageValues(); - - checkInvalidValue(Integer.toString(minHeapFreeValue - 1)); - checkValidValue(Integer.toString(minHeapFreeValue)); - checkValidValue("100"); - } +public class TestDynMaxHeapFreeRatio { public static void main(String args[]) throws Exception { - new TestDynMaxHeapFreeRatio().test(); + + // low boundary value + int minValue = DynamicVMOption.getInt("MinHeapFreeRatio"); + System.out.println("MinHeapFreeRatio= " + minValue); + + String badValues[] = { + null, + "", + "not a number", + "8.5", "-0.01", + Integer.toString(Integer.MIN_VALUE), + Integer.toString(Integer.MAX_VALUE), + Integer.toString(minValue - 1), + "-1024", "-1", "101", "1997" + }; + + String goodValues[] = { + Integer.toString(minValue), + Integer.toString(minValue + 1), + Integer.toString((minValue + 100) / 2), + "99", "100" + }; + + DynamicVMOption option = new DynamicVMOption("MaxHeapFreeRatio"); + + assertTrue(option.isWriteable(), "Option " + option.name + + " is expected to be writable"); + + for (String v : badValues) { + assertFalse(option.isValidValue(v), + "'" + v + "' is expected to be illegal for flag " + option.name); + } + for (String v : goodValues) { + option.setValue(v); + String newValue = option.getValue(); + assertEQ(v, newValue); + } } - } --- ./hotspot/test/gc/arguments/TestDynMinHeapFreeRatio.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/gc/arguments/TestDynMinHeapFreeRatio.java Wed Feb 04 12:14:39 2015 -0800 @@ -33,30 +33,52 @@ * @run main/othervm -XX:MinHeapFreeRatio=51 -XX:MaxHeapFreeRatio=52 TestDynMinHeapFreeRatio * @run main/othervm -XX:MinHeapFreeRatio=75 -XX:MaxHeapFreeRatio=100 TestDynMinHeapFreeRatio */ -import com.oracle.java.testlibrary.TestDynamicVMOption; -import com.oracle.java.testlibrary.DynamicVMOptionChecker; +import static com.oracle.java.testlibrary.Asserts.assertEQ; +import static com.oracle.java.testlibrary.Asserts.assertFalse; +import static com.oracle.java.testlibrary.Asserts.assertTrue; +import com.oracle.java.testlibrary.DynamicVMOption; -public class TestDynMinHeapFreeRatio extends TestDynamicVMOption { - - public static final String MinFreeRatioFlagName = "MinHeapFreeRatio"; - public static final String MaxFreeRatioFlagName = "MaxHeapFreeRatio"; - - public TestDynMinHeapFreeRatio() { - super(MinFreeRatioFlagName); - } - - public void test() { - int maxHeapFreeValue = DynamicVMOptionChecker.getIntValue(MaxFreeRatioFlagName); - System.out.println(MaxFreeRatioFlagName + " = " + maxHeapFreeValue); - - testPercentageValues(); - - checkInvalidValue(Integer.toString(maxHeapFreeValue + 1)); - checkValidValue(Integer.toString(maxHeapFreeValue)); - checkValidValue("0"); - } +public class TestDynMinHeapFreeRatio { public static void main(String args[]) throws Exception { - new TestDynMinHeapFreeRatio().test(); + + // high boundary value + int maxValue = DynamicVMOption.getInt("MaxHeapFreeRatio"); + System.out.println("MaxHeapFreeRatio= " + maxValue); + + String badValues[] = { + null, + "", + "not a number", + "8.5", "-0.01", + Integer.toString(Integer.MIN_VALUE), + Integer.toString(Integer.MAX_VALUE), + Integer.toString(maxValue + 1), + "-1024", "-1", "101", "1997" + }; + + String goodValues[] = { + Integer.toString(maxValue), + Integer.toString(maxValue - 1), + Integer.toString(maxValue / 2), + "0", "1" + }; + + // option under test + DynamicVMOption option = new DynamicVMOption("MinHeapFreeRatio"); + + assertTrue(option.isWriteable(), "Option " + option.name + + " is expected to be writable"); + + for (String v : badValues) { + assertFalse(option.isValidValue(v), + "'" + v + "' is expected to be illegal for flag " + option.name); + } + + for (String v : goodValues) { + option.setValue(v); + String newValue = option.getValue(); + assertEQ(v, newValue); + } } } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/arguments/TestG1ConcRefinementThreads.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,97 @@ +/* +* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +/* + * @test TestG1ConcRefinementThreads + * @key gc + * @bug 8047976 + * @summary Tests argument processing for G1ConcRefinementThreads + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.*; +import java.util.*; +import java.util.regex.*; + +public class TestG1ConcRefinementThreads { + + static final int AUTO_SELECT_THREADS_COUNT = 0; + static final int PASSED_THREADS_COUNT = 11; + + public static void main(String args[]) throws Exception { + // default case + runG1ConcRefinementThreadsTest( + new String[]{}, // automatically selected + AUTO_SELECT_THREADS_COUNT /* use default setting */); + + // zero setting case + runG1ConcRefinementThreadsTest( + new String[]{"-XX:G1ConcRefinementThreads=0"}, // automatically selected + AUTO_SELECT_THREADS_COUNT /* set to zero */); + + // non-zero sestting case + runG1ConcRefinementThreadsTest( + new String[]{"-XX:G1ConcRefinementThreads="+Integer.toString(PASSED_THREADS_COUNT)}, + PASSED_THREADS_COUNT); + } + + private static void runG1ConcRefinementThreadsTest(String[] passedOpts, + int expectedValue) throws Exception { + List vmOpts = new ArrayList<>(); + if (passedOpts.length > 0) { + Collections.addAll(vmOpts, passedOpts); + } + Collections.addAll(vmOpts, "-XX:+UseG1GC", "-XX:+PrintFlagsFinal", "-version"); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()])); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + output.shouldHaveExitValue(0); + String stdout = output.getStdout(); + checkG1ConcRefinementThreadsConsistency(stdout, expectedValue); + } + + private static void checkG1ConcRefinementThreadsConsistency(String output, int expectedValue) { + int actualValue = getIntValue("G1ConcRefinementThreads", output); + + if (expectedValue == 0) { + // If expectedValue is automatically selected, set it same as ParallelGCThreads. + expectedValue = getIntValue("ParallelGCThreads", output); + } + + if (expectedValue != actualValue) { + throw new RuntimeException( + "Actual G1ConcRefinementThreads(" + Integer.toString(actualValue) + + ") is not equal to expected value(" + Integer.toString(expectedValue) + ")"); + } + } + + public static int getIntValue(String flag, String where) { + Matcher m = Pattern.compile(flag + "\\s+:?=\\s+\\d+").matcher(where); + if (!m.find()) { + throw new RuntimeException("Could not find value for flag " + flag + " in output string"); + } + String match = m.group(); + return Integer.parseInt(match.substring(match.lastIndexOf(" ") + 1, match.length())); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/arguments/TestSurvivorAlignmentInBytesOption.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.ExitCode; +import com.oracle.java.testlibrary.cli.CommandLineOptionTest; + +/** + * @test + * @bug 8031323 + * @summary Verify SurvivorAlignmentInBytes option processing. + * @library /testlibrary + * @requires vm.opt.SurvivorAlignmentInBytes == null + * & vm.opt.ObjectAlignmentInBytes == null + * & vm.opt.UnlockExperimentalVMOptions == null + * & (vm.opt.IgnoreUnrecognizedVMOptions == null + * | vm.opt.IgnoreUnrecognizedVMOptions == "false") + * @run main TestSurvivorAlignmentInBytesOption + */ +public class TestSurvivorAlignmentInBytesOption { + public static void main(String args[]) throws Throwable { + String optionName = "SurvivorAlignmentInBytes"; + String unlockExperimentalVMOpts = "UnlockExperimentalVMOptions"; + String optionIsExperimental + = CommandLineOptionTest.getExperimentalOptionErrorMessage( + optionName); + String valueIsTooSmall= ".*SurvivorAlignmentInBytes=.*must be greater" + + " than ObjectAlignmentInBytes.*"; + String mustBePowerOf2 = ".*SurvivorAlignmentInBytes=.*must be " + + "power of 2.*"; + + // Verify that without -XX:+UnlockExperimentalVMOptions usage of + // SurvivorAlignmentInBytes option will cause JVM startup failure + // with the warning message saying that that option is experimental. + CommandLineOptionTest.verifyJVMStartup( + new String[]{optionIsExperimental}, null, ExitCode.FAIL, false, + "-XX:-UnlockExperimentalVMOptions", + CommandLineOptionTest.prepareBooleanFlag( + unlockExperimentalVMOpts, false), + CommandLineOptionTest.prepareNumericFlag(optionName, 64)); + + // Verify that with -XX:+UnlockExperimentalVMOptions passed to JVM + // usage of SurvivorAlignmentInBytes option won't cause JVM startup + // failure. + CommandLineOptionTest.verifyJVMStartup( + null, new String[]{optionIsExperimental}, ExitCode.OK, false, + CommandLineOptionTest.prepareBooleanFlag( + unlockExperimentalVMOpts, true), + CommandLineOptionTest.prepareNumericFlag(optionName, 64)); + + // Verify that if specified SurvivorAlignmentInBytes is lower then + // ObjectAlignmentInBytes, then the JVM startup will fail with + // appropriate error message. + CommandLineOptionTest.verifyJVMStartup( + new String[]{valueIsTooSmall}, null, ExitCode.FAIL, false, + CommandLineOptionTest.prepareBooleanFlag( + unlockExperimentalVMOpts, true), + CommandLineOptionTest.prepareNumericFlag(optionName, 2)); + + // Verify that if specified SurvivorAlignmentInBytes value is not + // a power of 2 then the JVM startup will fail with appropriate error + // message. + CommandLineOptionTest.verifyJVMStartup( + new String[]{mustBePowerOf2}, null, ExitCode.FAIL, false, + CommandLineOptionTest.prepareBooleanFlag( + unlockExperimentalVMOpts, true), + CommandLineOptionTest.prepareNumericFlag(optionName, 127)); + + // Verify that if SurvivorAlignmentInBytes has correct value, then + // the JVM will be started without errors. + CommandLineOptionTest.verifyJVMStartup( + null, new String[]{".*SurvivorAlignmentInBytes.*"}, + ExitCode.OK, false, + CommandLineOptionTest.prepareBooleanFlag( + unlockExperimentalVMOpts, true), + CommandLineOptionTest.prepareNumericFlag(optionName, 128)); + + // Verify that we can setup different SurvivorAlignmentInBytes values. + for (int alignment = 32; alignment <= 128; alignment *= 2) { + CommandLineOptionTest.verifyOptionValue(optionName, + Integer.toString(alignment), + CommandLineOptionTest.prepareBooleanFlag( + unlockExperimentalVMOpts, true), + CommandLineOptionTest.prepareNumericFlag( + optionName, alignment)); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @key gc + * @bug 8049831 + * @library /testlibrary /testlibrary/whitebox + * @build TestCMSClassUnloadingEnabledHWM + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run driver TestCMSClassUnloadingEnabledHWM + * @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated. + */ + +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.ProcessTools; +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.Arrays; +import sun.hotspot.WhiteBox; + +public class TestCMSClassUnloadingEnabledHWM { + private static long MetaspaceSize = 32 * 1024 * 1024; + private static long YoungGenSize = 32 * 1024 * 1024; + + private static OutputAnalyzer run(boolean enableUnloading) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xbootclasspath/a:.", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+WhiteBoxAPI", + "-Xmx128m", + "-XX:CMSMaxAbortablePrecleanTime=1", + "-XX:CMSWaitDuration=50", + "-XX:MetaspaceSize=" + MetaspaceSize, + "-Xmn" + YoungGenSize, + "-XX:+UseConcMarkSweepGC", + "-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled", + "-XX:+PrintHeapAtGC", + "-XX:+PrintGCDetails", + "-XX:+PrintGCTimeStamps", + TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(), + "" + MetaspaceSize); + return new OutputAnalyzer(pb.start()); + } + + public static OutputAnalyzer runWithCMSClassUnloading() throws Exception { + return run(true); + } + + public static OutputAnalyzer runWithoutCMSClassUnloading() throws Exception { + return run(false); + } + + public static void testWithoutCMSClassUnloading() throws Exception { + // -XX:-CMSClassUnloadingEnabled is used, so we expect a full GC instead of a concurrent cycle. + OutputAnalyzer out = runWithoutCMSClassUnloading(); + + out.shouldMatch(".*Full GC.*"); + out.shouldNotMatch(".*CMS Initial Mark.*"); + } + + public static void testWithCMSClassUnloading() throws Exception { + // -XX:+CMSClassUnloadingEnabled is used, so we expect a concurrent cycle instead of a full GC. + OutputAnalyzer out = runWithCMSClassUnloading(); + + out.shouldMatch(".*CMS Initial Mark.*"); + out.shouldNotMatch(".*Full GC.*"); + } + + public static void main(String args[]) throws Exception { + testWithCMSClassUnloading(); + testWithoutCMSClassUnloading(); + } + + public static class AllocateBeyondMetaspaceSize { + public static void main(String [] args) throws Exception { + if (args.length != 1) { + throw new IllegalArgumentException("Usage: "); + } + + WhiteBox wb = WhiteBox.getWhiteBox(); + + // Allocate past the MetaspaceSize limit. + long metaspaceSize = Long.parseLong(args[0]); + long allocationBeyondMetaspaceSize = metaspaceSize * 2; + long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize); + + // Wait for at least one GC to occur. The caller will parse the log files produced. + GarbageCollectorMXBean cmsGCBean = getCMSGCBean(); + while (cmsGCBean.getCollectionCount() == 0) { + Thread.sleep(100); + } + + wb.freeMetaspace(null, metaspace, metaspace); + } + + private static GarbageCollectorMXBean getCMSGCBean() { + for (GarbageCollectorMXBean gcBean : ManagementFactory.getGarbageCollectorMXBeans()) { + if (gcBean.getObjectName().toString().equals("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep")) { + return gcBean; + } + } + return null; + } + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/class_unloading/TestG1ClassUnloadingHWM.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @key gc + * @bug 8049831 + * @library /testlibrary /testlibrary/whitebox + * @build TestG1ClassUnloadingHWM + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run driver TestG1ClassUnloadingHWM + * @summary Test that -XX:-ClassUnloadingWithConcurrentMark will trigger a Full GC when more than MetaspaceSize metadata is allocated. + */ + +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.ProcessTools; +import java.util.ArrayList; +import java.util.Arrays; +import sun.hotspot.WhiteBox; + +public class TestG1ClassUnloadingHWM { + private static long MetaspaceSize = 32 * 1024 * 1024; + private static long YoungGenSize = 32 * 1024 * 1024; + + private static OutputAnalyzer run(boolean enableUnloading) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xbootclasspath/a:.", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+WhiteBoxAPI", + "-XX:MetaspaceSize=" + MetaspaceSize, + "-Xmn" + YoungGenSize, + "-XX:+UseG1GC", + "-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark", + "-XX:+PrintHeapAtGC", + "-XX:+PrintGCDetails", + TestG1ClassUnloadingHWM.AllocateBeyondMetaspaceSize.class.getName(), + "" + MetaspaceSize, + "" + YoungGenSize); + return new OutputAnalyzer(pb.start()); + } + + public static OutputAnalyzer runWithG1ClassUnloading() throws Exception { + return run(true); + } + + public static OutputAnalyzer runWithoutG1ClassUnloading() throws Exception { + return run(false); + } + + public static void testWithoutG1ClassUnloading() throws Exception { + // -XX:-ClassUnloadingWithConcurrentMark is used, so we expect a full GC instead of a concurrent cycle. + OutputAnalyzer out = runWithoutG1ClassUnloading(); + + out.shouldMatch(".*Full GC.*"); + out.shouldNotMatch(".*initial-mark.*"); + } + + public static void testWithG1ClassUnloading() throws Exception { + // -XX:+ClassUnloadingWithConcurrentMark is used, so we expect a concurrent cycle instead of a full GC. + OutputAnalyzer out = runWithG1ClassUnloading(); + + out.shouldMatch(".*initial-mark.*"); + out.shouldNotMatch(".*Full GC.*"); + } + + public static void main(String args[]) throws Exception { + testWithG1ClassUnloading(); + testWithoutG1ClassUnloading(); + } + + public static class AllocateBeyondMetaspaceSize { + public static Object dummy; + + public static void main(String [] args) throws Exception { + if (args.length != 2) { + throw new IllegalArgumentException("Usage: "); + } + + WhiteBox wb = WhiteBox.getWhiteBox(); + + // Allocate past the MetaspaceSize limit + long metaspaceSize = Long.parseLong(args[0]); + long allocationBeyondMetaspaceSize = metaspaceSize * 2; + long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize); + + long youngGenSize = Long.parseLong(args[1]); + triggerYoungGCs(youngGenSize); + + wb.freeMetaspace(null, metaspace, metaspace); + } + + public static void triggerYoungGCs(long youngGenSize) { + long approxAllocSize = 32 * 1024; + long numAllocations = 2 * youngGenSize / approxAllocSize; + + for (long i = 0; i < numAllocations; i++) { + dummy = new byte[(int)approxAllocSize]; + } + } + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/concurrentMarkSweep/DisableResizePLAB.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,44 @@ +/* +* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +/* + * @test DisableResizePLAB + * @key gc + * @bug 8060467 + * @author filipp.zhinkin@oracle.com, john.coomes@oracle.com + * @summary Run CMS with PLAB resizing disabled and a small OldPLABSize + * @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -XX:+PrintGCDetails DisableResizePLAB + */ + +public class DisableResizePLAB { + public static void main(String args[]) throws Exception { + Object garbage[] = new Object[1_000]; + for (int i = 0; i < garbage.length; i++) { + garbage[i] = new byte[0]; + } + long startTime = System.currentTimeMillis(); + while (System.currentTimeMillis() - startTime < 10_000) { + Object o = new byte[1024]; + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestEagerReclaimHumongousRegions.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestEagerReclaimHumongousRegions + * @bug 8027959 + * @summary Test to make sure that eager reclaim of humongous objects work. We simply try to fill + * up the heap with humongous objects that should be eagerly reclaimable to avoid Full GC. + * @key gc + * @library /testlibrary + */ + +import java.util.regex.Pattern; +import java.util.regex.Matcher; +import java.util.LinkedList; + +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.ProcessTools; +import com.oracle.java.testlibrary.Asserts; + +class ReclaimRegionFast { + public static final int M = 1024*1024; + + public static LinkedList garbageList = new LinkedList(); + + public static void genGarbage() { + for (int i = 0; i < 32*1024; i++) { + garbageList.add(new int[100]); + } + garbageList.clear(); + } + + // A large object referenced by a static. + static int[] filler = new int[10 * M]; + + public static void main(String[] args) { + + int[] large = new int[M]; + + Object ref_from_stack = large; + + for (int i = 0; i < 100; i++) { + // A large object that will be reclaimed eagerly. + large = new int[6*M]; + genGarbage(); + // Make sure that the compiler cannot completely remove + // the allocation of the large object until here. + System.out.println(large); + } + + // Keep the reference to the first object alive. + System.out.println(ref_from_stack); + } +} + +public class TestEagerReclaimHumongousRegions { + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UseG1GC", + "-Xms128M", + "-Xmx128M", + "-Xmn16M", + "-XX:+PrintGC", + ReclaimRegionFast.class.getName()); + + Pattern p = Pattern.compile("Full GC"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + int found = 0; + Matcher m = p.matcher(output.getStdout()); + while (m.find()) { found++; } + System.out.println("Issued " + found + " Full GCs"); + Asserts.assertLT(found, 10, "Found that " + found + " Full GCs were issued. This is larger than the bound. Eager reclaim seems to not work at all"); + + output.shouldHaveExitValue(0); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestEagerReclaimHumongousRegions2.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestEagerReclaimHumongousRegions2 + * @bug 8051973 + * @summary Test to make sure that eager reclaim of humongous objects correctly clears + * mark bitmaps at reclaim. + * @key gc + * @library /testlibrary + */ + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.Random; + +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.ProcessTools; + +// An object that has a few references to other instances to slow down marking. +class ObjectWithSomeRefs { + public ObjectWithSomeRefs other1; + public ObjectWithSomeRefs other2; + public ObjectWithSomeRefs other3; + public ObjectWithSomeRefs other4; +} + +class ReclaimRegionFast { + public static final long MAX_MILLIS_FOR_RUN = 50 * 1000; // The maximum runtime for the actual test. + + public static final int M = 1024*1024; + + public static LinkedList garbageList = new LinkedList(); + + public static void genGarbage(Object large) { + for (int i = 0; i < 64*1024; i++) { + Object[] garbage = new Object[50]; + garbage[0] = large; + garbageList.add(garbage); + } + garbageList.clear(); + } + + public static ArrayList longList = new ArrayList(); + + public static void main(String[] args) { + + for (int i = 0; i < 16*1024; i++) { + longList.add(new ObjectWithSomeRefs()); + } + + Random rnd = new Random(); + for (int i = 0; i < longList.size(); i++) { + int len = longList.size(); + longList.get(i).other1 = longList.get(rnd.nextInt(len)); + longList.get(i).other2 = longList.get(rnd.nextInt(len)); + longList.get(i).other3 = longList.get(rnd.nextInt(len)); + longList.get(i).other4 = longList.get(rnd.nextInt(len)); + } + + int[] large1 = new int[M]; + int[] large2 = null; + int[] large3 = null; + int[] large4 = null; + + Object ref_from_stack = large1; + + long start_millis = System.currentTimeMillis(); + + for (int i = 0; i < 20; i++) { + long current_millis = System.currentTimeMillis(); + if ((current_millis - start_millis) > MAX_MILLIS_FOR_RUN) { + System.out.println("Finishing test because maximum runtime exceeded"); + break; + } + // A set of large objects that will be reclaimed eagerly - and hopefully marked. + large1 = new int[M - 20]; + large2 = new int[M - 20]; + large3 = new int[M - 20]; + large4 = new int[M - 20]; + genGarbage(large1); + // Make sure that the compiler cannot completely remove + // the allocation of the large object until here. + System.out.println(large1 + " " + large2 + " " + large3 + " " + large4); + } + + // Keep the reference to the first object alive. + System.out.println(ref_from_stack); + } +} + +public class TestEagerReclaimHumongousRegions2 { + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UseG1GC", + "-Xms128M", + "-Xmx128M", + "-Xmn2M", + "-XX:G1HeapRegionSize=1M", + "-XX:InitiatingHeapOccupancyPercent=0", // Want to have as much as possible initial marks. + "-XX:+PrintGC", + "-XX:+VerifyAfterGC", + "-XX:ConcGCThreads=1", // Want to make marking as slow as possible. + "-XX:+IgnoreUnrecognizedVMOptions", // G1VerifyBitmaps is develop only. + "-XX:+G1VerifyBitmaps", + ReclaimRegionFast.class.getName()); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } +} + --- ./hotspot/test/gc/g1/TestGCLogMessages.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/gc/g1/TestGCLogMessages.java Wed Feb 04 12:14:39 2015 -0800 @@ -22,8 +22,8 @@ */ /* - * @test TestPrintGCDetails - * @bug 8035406 8027295 8035398 + * @test TestGCLogMessages + * @bug 8035406 8027295 8035398 8019342 8027959 * @summary Ensure that the PrintGCDetails output for a minor GC with G1 * includes the expected necessary messages. * @key gc @@ -48,10 +48,13 @@ OutputAnalyzer output = new OutputAnalyzer(pb.start()); output.shouldNotContain("[Redirty Cards"); + output.shouldNotContain("[Parallel Redirty"); + output.shouldNotContain("[Redirtied Cards"); output.shouldNotContain("[Code Root Purge"); output.shouldNotContain("[String Dedup Fixup"); output.shouldNotContain("[Young Free CSet"); output.shouldNotContain("[Non-Young Free CSet"); + output.shouldNotContain("[Humongous Reclaim"); output.shouldHaveExitValue(0); pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", @@ -63,10 +66,16 @@ output = new OutputAnalyzer(pb.start()); output.shouldContain("[Redirty Cards"); + output.shouldNotContain("[Parallel Redirty"); + output.shouldNotContain("[Redirtied Cards"); output.shouldContain("[Code Root Purge"); output.shouldContain("[String Dedup Fixup"); output.shouldNotContain("[Young Free CSet"); output.shouldNotContain("[Non-Young Free CSet"); + output.shouldContain("[Humongous Reclaim"); + output.shouldNotContain("[Humongous Total"); + output.shouldNotContain("[Humongous Candidate"); + output.shouldNotContain("[Humongous Reclaimed"); output.shouldHaveExitValue(0); pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", @@ -80,16 +89,16 @@ output = new OutputAnalyzer(pb.start()); output.shouldContain("[Redirty Cards"); + output.shouldContain("[Parallel Redirty"); + output.shouldContain("[Redirtied Cards"); output.shouldContain("[Code Root Purge"); output.shouldContain("[String Dedup Fixup"); output.shouldContain("[Young Free CSet"); output.shouldContain("[Non-Young Free CSet"); - - // also check evacuation failure messages once - output.shouldNotContain("[Evacuation Failure"); - output.shouldNotContain("[Recalculate Used"); - output.shouldNotContain("[Remove Self Forwards"); - output.shouldNotContain("[Restore RemSet"); + output.shouldContain("[Humongous Reclaim"); + output.shouldContain("[Humongous Total"); + output.shouldContain("[Humongous Candidate"); + output.shouldContain("[Humongous Reclaimed"); output.shouldHaveExitValue(0); } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestHumongousShrinkHeap.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestHumongousShrinkHeap + * @bug 8036025 8056043 + * @summary Verify that heap shrinks after GC in the presence of fragmentation + * due to humongous objects + * @library /testlibrary + * @run main/othervm -XX:-ExplicitGCInvokesConcurrent -XX:MinHeapFreeRatio=10 + * -XX:MaxHeapFreeRatio=12 -XX:+UseG1GC -XX:G1HeapRegionSize=1M -verbose:gc + * TestHumongousShrinkHeap + */ + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryUsage; +import java.util.ArrayList; +import java.util.List; +import sun.management.ManagementFactoryHelper; +import static com.oracle.java.testlibrary.Asserts.*; + +public class TestHumongousShrinkHeap { + + public static final String MIN_FREE_RATIO_FLAG_NAME = "MinHeapFreeRatio"; + public static final String MAX_FREE_RATIO_FLAG_NAME = "MaxHeapFreeRatio"; + + private static final List> garbage = new ArrayList(); + private static final int REGION_SIZE = 1024 * 1024; // 1M + private static final int LISTS_COUNT = 10; + private static final int HUMON_SIZE = Math.round(.9f * REGION_SIZE); + private static final long AVAILABLE_MEMORY + = Runtime.getRuntime().freeMemory(); + private static final int HUMON_COUNT + = (int) ((AVAILABLE_MEMORY / HUMON_SIZE) + / LISTS_COUNT); + + + public static void main(String[] args) { + System.out.format("Running with %s max heap size. " + + "Will allocate humongous object of %s size %d times.%n", + MemoryUsagePrinter.humanReadableByteCount(AVAILABLE_MEMORY, false), + MemoryUsagePrinter.humanReadableByteCount(HUMON_SIZE, false), + HUMON_COUNT + ); + new TestHumongousShrinkHeap().test(); + } + + private final void test() { + System.gc(); + MemoryUsagePrinter.printMemoryUsage("init"); + + allocate(); + MemoryUsagePrinter.printMemoryUsage("allocated"); + MemoryUsage muFull = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + + free(); + MemoryUsagePrinter.printMemoryUsage("free"); + MemoryUsage muFree = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + + assertLessThan(muFree.getCommitted(), muFull.getCommitted(), String.format( + "committed free heap size is not less than committed full heap size, heap hasn't been shrunk?%n" + + "%s = %s%n%s = %s", + MIN_FREE_RATIO_FLAG_NAME, + ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MIN_FREE_RATIO_FLAG_NAME).getValue(), + MAX_FREE_RATIO_FLAG_NAME, + ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MAX_FREE_RATIO_FLAG_NAME).getValue() + )); + } + + private void allocate() { + + for (int i = 0; i < LISTS_COUNT; i++) { + List stuff = new ArrayList(); + allocateList(stuff, HUMON_COUNT, HUMON_SIZE); + MemoryUsagePrinter.printMemoryUsage("allocate #" + (i+1)); + garbage.add(stuff); + } + } + + private void free() { + // do not free last one list + garbage.subList(0, garbage.size() - 1).clear(); + + // do not free last one element from last list + List stuff = garbage.get(garbage.size() - 1); + stuff.subList(0, stuff.size() - 1).clear(); + System.gc(); + } + + private static void allocateList(List garbage, int count, int size) { + for (int i = 0; i < count; i++) { + garbage.add(new byte[size]); + } + } +} + +/** + * Prints memory usage to standard output + */ +class MemoryUsagePrinter { + + public static String humanReadableByteCount(long bytes, boolean si) { + int unit = si ? 1000 : 1024; + if (bytes < unit) { + return bytes + " B"; + } + int exp = (int) (Math.log(bytes) / Math.log(unit)); + String pre = (si ? "kMGTPE" : "KMGTPE").charAt(exp - 1) + (si ? "" : "i"); + return String.format("%.1f %sB", bytes / Math.pow(unit, exp), pre); + } + + public static void printMemoryUsage(String label) { + MemoryUsage memusage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + float freeratio = 1f - (float) memusage.getUsed() / memusage.getCommitted(); + System.out.format("[%-24s] init: %-7s, used: %-7s, comm: %-7s, freeRatio ~= %.1f%%%n", + label, + humanReadableByteCount(memusage.getInit(), false), + humanReadableByteCount(memusage.getUsed(), false), + humanReadableByteCount(memusage.getCommitted(), false), + freeratio * 100 + ); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestShrinkAuxiliaryData.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import static com.oracle.java.testlibrary.Asserts.assertLessThanOrEqual; +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.Platform; +import com.oracle.java.testlibrary.ProcessTools; +import com.oracle.java.testlibrary.Utils; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryUsage; +import java.text.DecimalFormat; +import java.text.DecimalFormatSymbols; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import sun.misc.Unsafe; + +public class TestShrinkAuxiliaryData { + + private final static String[] initialOpts = new String[]{ + "-XX:MinHeapFreeRatio=10", + "-XX:MaxHeapFreeRatio=11", + "-XX:+UseG1GC", + "-XX:G1HeapRegionSize=1m", + "-XX:-ExplicitGCInvokesConcurrent", + "-XX:+PrintGCDetails" + }; + + private final int RSetCacheSize; + + protected TestShrinkAuxiliaryData(int RSetCacheSize) { + this.RSetCacheSize = RSetCacheSize; + } + + protected void test() throws Exception { + ArrayList vmOpts = new ArrayList(); + Collections.addAll(vmOpts, initialOpts); + + int maxCacheSize = Math.max(0, Math.min(31, getMaxCacheSize())); + if (maxCacheSize < RSetCacheSize) { + System.out.format("Skiping test for %d cache size due max cache size %d", + RSetCacheSize, maxCacheSize + ); + return; + } + + printTestInfo(maxCacheSize); + + vmOpts.add("-XX:G1ConcRSLogCacheSize=" + RSetCacheSize); + + vmOpts.addAll(Arrays.asList(Utils.getFilteredTestJavaOpts( + ShrinkAuxiliaryDataTest.prohibitedVmOptions))); + + // for 32 bits ObjectAlignmentInBytes is not a option + if (Platform.is32bit()) { + ArrayList vmOptsWithoutAlign = new ArrayList(vmOpts); + vmOptsWithoutAlign.add(ShrinkAuxiliaryDataTest.class.getName()); + performTest(vmOptsWithoutAlign); + return; + } + + for (int alignment = 3; alignment <= 8; alignment++) { + ArrayList vmOptsWithAlign = new ArrayList(vmOpts); + vmOptsWithAlign.add("-XX:ObjectAlignmentInBytes=" + + (int) Math.pow(2, alignment)); + vmOptsWithAlign.add(ShrinkAuxiliaryDataTest.class.getName()); + + performTest(vmOptsWithAlign); + } + } + + private void performTest(List opts) throws Exception { + ProcessBuilder pb + = ProcessTools.createJavaProcessBuilder( + opts.toArray(new String[opts.size()]) + ); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } + + private void printTestInfo(int maxCacheSize) { + + DecimalFormat grouped = new DecimalFormat("000,000"); + DecimalFormatSymbols formatSymbols = grouped.getDecimalFormatSymbols(); + formatSymbols.setGroupingSeparator(' '); + grouped.setDecimalFormatSymbols(formatSymbols); + + System.out.format("Test will use %s bytes of memory of %s available%n" + + "Available memory is %s with %d bytes pointer size - can save %s pointers%n" + + "Max cache size: 2^%d = %s elements%n", + grouped.format(ShrinkAuxiliaryDataTest.getMemoryUsedByTest()), + grouped.format(Runtime.getRuntime().freeMemory()), + grouped.format(Runtime.getRuntime().freeMemory() + - ShrinkAuxiliaryDataTest.getMemoryUsedByTest()), + Unsafe.ADDRESS_SIZE, + grouped.format((Runtime.getRuntime().freeMemory() + - ShrinkAuxiliaryDataTest.getMemoryUsedByTest()) + / Unsafe.ADDRESS_SIZE), + maxCacheSize, + grouped.format((int) Math.pow(2, maxCacheSize)) + ); + } + + /** + * Detects maximum possible size of G1ConcRSLogCacheSize available for + * current process based on maximum available process memory size + * + * @return power of two + */ + private static int getMaxCacheSize() { + long availableMemory = Runtime.getRuntime().freeMemory() + - ShrinkAuxiliaryDataTest.getMemoryUsedByTest() - 1l; + if (availableMemory <= 0) { + return 0; + } + long availablePointersCount = availableMemory / Unsafe.ADDRESS_SIZE; + return (63 - (int) Long.numberOfLeadingZeros(availablePointersCount)); + } + + static class ShrinkAuxiliaryDataTest { + + public static void main(String[] args) throws IOException { + int iterateCount = DEFAULT_ITERATION_COUNT; + + if (args.length > 0) { + try { + iterateCount = Integer.parseInt(args[0]); + } catch (NumberFormatException e) { + //num_iterate remains default + } + } + + new ShrinkAuxiliaryDataTest().test(iterateCount); + } + + class GarbageObject { + + private final List payload = new ArrayList(); + private final List ref = new LinkedList(); + + public GarbageObject(int size) { + payload.add(new byte[size]); + } + + public void addRef(GarbageObject g) { + ref.add(g); + } + + public void mutate() { + if (!payload.isEmpty() && payload.get(0).length > 0) { + payload.get(0)[0] = (byte) (Math.random() * Byte.MAX_VALUE); + } + } + } + + private final List garbage = new ArrayList(); + + public void test(int num_iterate) throws IOException { + + allocate(); + link(); + mutate(); + deallocate(); + + MemoryUsage muBeforeHeap + = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + MemoryUsage muBeforeNonHeap + = ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage(); + + for (int i = 0; i < num_iterate; i++) { + allocate(); + link(); + mutate(); + deallocate(); + } + + System.gc(); + MemoryUsage muAfterHeap + = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + MemoryUsage muAfterNonHeap + = ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage(); + + assertLessThanOrEqual(muAfterHeap.getCommitted(), muBeforeHeap.getCommitted(), + String.format("heap decommit failed - after > before: %d > %d", + muAfterHeap.getCommitted(), muBeforeHeap.getCommitted() + ) + ); + + if (muAfterHeap.getCommitted() < muBeforeHeap.getCommitted()) { + assertLessThanOrEqual(muAfterNonHeap.getCommitted(), muBeforeNonHeap.getCommitted(), + String.format("non-heap decommit failed - after > before: %d > %d", + muAfterNonHeap.getCommitted(), muBeforeNonHeap.getCommitted() + ) + ); + } + } + + private void allocate() { + for (int r = 0; r < REGIONS_TO_ALLOCATE; r++) { + for (int i = 0; i < NUM_OBJECTS_PER_REGION; i++) { + GarbageObject g = new GarbageObject(REGION_SIZE + / NUM_OBJECTS_PER_REGION); + garbage.add(g); + } + } + } + + /** + * Iterate through all allocated objects, and link to objects in another + * regions + */ + private void link() { + for (int ig = 0; ig < garbage.size(); ig++) { + int regionNumber = ig / NUM_OBJECTS_PER_REGION; + + for (int i = 0; i < NUM_LINKS; i++) { + int regionToLink; + do { + regionToLink = (int) (Math.random() + * REGIONS_TO_ALLOCATE); + } while (regionToLink == regionNumber); + + // get random garbage object from random region + garbage.get(ig).addRef(garbage.get(regionToLink + * NUM_OBJECTS_PER_REGION + (int) (Math.random() + * NUM_OBJECTS_PER_REGION))); + } + } + } + + private void mutate() { + for (int ig = 0; ig < garbage.size(); ig++) { + garbage.get(ig).mutate(); + } + } + + private void deallocate() { + garbage.clear(); + System.gc(); + } + + static long getMemoryUsedByTest() { + return REGIONS_TO_ALLOCATE * REGION_SIZE; + } + + private static final int REGION_SIZE = 1024 * 1024; + private static final int DEFAULT_ITERATION_COUNT = 1; // iterate main scenario + private static final int REGIONS_TO_ALLOCATE = 5; + private static final int NUM_OBJECTS_PER_REGION = 10; + private static final int NUM_LINKS = 20; // how many links create for each object + + private static final String[] prohibitedVmOptions = { + // remove this when @requires option will be on duty + "-XX:\\+UseParallelGC", + "-XX:\\+UseSerialGC", + "-XX:\\+UseConcMarkSweepGC", + "-XX:\\+UseParallelOldGC", + "-XX:\\+UseParNewGC", + "-Xconcgc", + "-Xincgc" + }; + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestShrinkAuxiliaryData00.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestShrinkAuxiliaryData00 + * @bug 8038423 + * @summary Checks that decommitment occurs for JVM with different + * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values + * @library /testlibrary /testlibrary/whitebox + * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData00 + * @run driver/timeout=720 TestShrinkAuxiliaryData00 + */ +public class TestShrinkAuxiliaryData00 { + + public static void main(String[] args) throws Exception { + new TestShrinkAuxiliaryData(0).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestShrinkAuxiliaryData05.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestShrinkAuxiliaryData05 + * @bug 8038423 + * @summary Checks that decommitment occurs for JVM with different + * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values + * @library /testlibrary /testlibrary/whitebox + * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData05 + * @run driver/timeout=720 TestShrinkAuxiliaryData05 + */ +public class TestShrinkAuxiliaryData05 { + + public static void main(String[] args) throws Exception { + new TestShrinkAuxiliaryData(5).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestShrinkAuxiliaryData10.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestShrinkAuxiliaryData10 + * @bug 8038423 + * @summary Checks that decommitment occurs for JVM with different + * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values + * @library /testlibrary /testlibrary/whitebox + * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData10 + * @run driver/timeout=720 TestShrinkAuxiliaryData10 + */ +public class TestShrinkAuxiliaryData10 { + + public static void main(String[] args) throws Exception { + new TestShrinkAuxiliaryData(10).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestShrinkAuxiliaryData15.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestShrinkAuxiliaryData15 + * @bug 8038423 + * @summary Checks that decommitment occurs for JVM with different + * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values + * @library /testlibrary /testlibrary/whitebox + * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData15 + * @run driver/timeout=720 TestShrinkAuxiliaryData15 + */ +public class TestShrinkAuxiliaryData15 { + + public static void main(String[] args) throws Exception { + new TestShrinkAuxiliaryData(15).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestShrinkAuxiliaryData20.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestShrinkAuxiliaryData20 + * @bug 8038423 + * @summary Checks that decommitment occurs for JVM with different + * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values + * @library /testlibrary /testlibrary/whitebox + * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData20 + * @run driver/timeout=720 TestShrinkAuxiliaryData20 + */ +public class TestShrinkAuxiliaryData20 { + + public static void main(String[] args) throws Exception { + new TestShrinkAuxiliaryData(20).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestShrinkAuxiliaryData25.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestShrinkAuxiliaryData25 + * @bug 8038423 + * @summary Checks that decommitment occurs for JVM with different + * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values + * @library /testlibrary /testlibrary/whitebox + * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData25 + * @run driver/timeout=720 TestShrinkAuxiliaryData25 + */ +public class TestShrinkAuxiliaryData25 { + + public static void main(String[] args) throws Exception { + new TestShrinkAuxiliaryData(25).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestShrinkAuxiliaryData30.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestShrinkAuxiliaryData30 + * @bug 8038423 + * @summary Checks that decommitment occurs for JVM with different + * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values + * @library /testlibrary /testlibrary/whitebox + * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData30 + * @run driver/timeout=720 TestShrinkAuxiliaryData30 + */ +public class TestShrinkAuxiliaryData30 { + + public static void main(String[] args) throws Exception { + new TestShrinkAuxiliaryData(30).test(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/g1/TestShrinkDefragmentedHeap.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestShrinkDefragmentedHeap + * @bug 8038423 + * @summary Verify that heap shrinks after GC in the presence of fragmentation due to humongous objects + * 1. allocate small objects mixed with humongous ones + * "ssssHssssHssssHssssHssssHssssHssssH" + * 2. release all allocated object except the last humongous one + * "..................................H" + * 3. invoke gc and check that memory returned to the system (amount of committed memory got down) + * + * @library /testlibrary + */ +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryUsage; +import java.util.ArrayList; +import java.util.List; +import sun.management.ManagementFactoryHelper; +import static com.oracle.java.testlibrary.Asserts.*; +import com.oracle.java.testlibrary.ProcessTools; +import com.oracle.java.testlibrary.OutputAnalyzer; + +public class TestShrinkDefragmentedHeap { + // Since we store all the small objects, they become old and old regions are also allocated at the bottom of the heap + // together with humongous regions. So if there are a lot of old regions in the lower part of the heap, + // the humongous regions will be allocated in the upper part of the heap anyway. + // To avoid this the Eden needs to be big enough to fit all the small objects. + private static final int INITIAL_HEAP_SIZE = 200 * 1024 * 1024; + private static final int MINIMAL_YOUNG_SIZE = 190 * 1024 * 1024; + private static final int REGION_SIZE = 1 * 1024 * 1024; + + public static void main(String[] args) throws Exception, Throwable { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:InitialHeapSize=" + INITIAL_HEAP_SIZE, + "-Xmn" + MINIMAL_YOUNG_SIZE, + "-XX:MinHeapFreeRatio=10", + "-XX:MaxHeapFreeRatio=11", + "-XX:+UseG1GC", + "-XX:G1HeapRegionSize=" + REGION_SIZE, + "-XX:-ExplicitGCInvokesConcurrent", + "-verbose:gc", + GCTest.class.getName() + ); + + OutputAnalyzer output = ProcessTools.executeProcess(pb); + output.shouldHaveExitValue(0); + } + + static class GCTest { + + private static final String MIN_FREE_RATIO_FLAG_NAME = "MinHeapFreeRatio"; + private static final String MAX_FREE_RATIO_FLAG_NAME = "MaxHeapFreeRatio"; + private static final String NEW_SIZE_FLAG_NAME = "NewSize"; + + private static final ArrayList> garbage = new ArrayList<>(); + + private static final int SMALL_OBJS_SIZE = 10 * 1024; // 10kB + private static final int SMALL_OBJS_COUNT = MINIMAL_YOUNG_SIZE / (SMALL_OBJS_SIZE-1); + private static final int ALLOCATE_COUNT = 3; + // try to put all humongous object into gap between min young size and initial heap size + // to avoid implicit GCs + private static final int HUMONG_OBJS_SIZE = (int) Math.max( + (INITIAL_HEAP_SIZE - MINIMAL_YOUNG_SIZE) / ALLOCATE_COUNT / 4, + REGION_SIZE * 1.1 + ); + + private static final long initialHeapSize = getHeapMemoryUsage().getUsed(); + + public static void main(String[] args) throws InterruptedException { + new GCTest().test(); + } + + private void test() throws InterruptedException { + MemoryUsagePrinter.printMemoryUsage("init"); + + allocate(); + System.gc(); + MemoryUsage muFull = getHeapMemoryUsage(); + MemoryUsagePrinter.printMemoryUsage("allocated"); + + free(); + //Thread.sleep(1000); // sleep before measures due lags in JMX + MemoryUsage muFree = getHeapMemoryUsage(); + MemoryUsagePrinter.printMemoryUsage("free"); + + assertLessThan(muFree.getCommitted(), muFull.getCommitted(), prepareMessageCommittedIsNotLess() ); + } + + private void allocate() { + System.out.format("Will allocate objects of small size = %s and humongous size = %s", + MemoryUsagePrinter.humanReadableByteCount(SMALL_OBJS_SIZE, false), + MemoryUsagePrinter.humanReadableByteCount(HUMONG_OBJS_SIZE, false) + ); + + for (int i = 0; i < ALLOCATE_COUNT; i++) { + ArrayList stuff = new ArrayList<>(); + allocateList(stuff, SMALL_OBJS_COUNT / ALLOCATE_COUNT, SMALL_OBJS_SIZE); + garbage.add(stuff); + + ArrayList humongousStuff = new ArrayList<>(); + allocateList(humongousStuff, 4, HUMONG_OBJS_SIZE); + garbage.add(humongousStuff); + } + } + + private void free() { + // do not free last one list + garbage.subList(0, garbage.size() - 1).clear(); + + // do not free last one element from last list + ArrayList stuff = garbage.get(garbage.size() - 1); + if (stuff.size() > 1) { + stuff.subList(0, stuff.size() - 1).clear(); + } + System.gc(); + } + + private String prepareMessageCommittedIsNotLess() { + return String.format( + "committed free heap size is not less than committed full heap size, heap hasn't been shrunk?%n" + + "%s = %s%n%s = %s", + MIN_FREE_RATIO_FLAG_NAME, + ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MIN_FREE_RATIO_FLAG_NAME).getValue(), + MAX_FREE_RATIO_FLAG_NAME, + ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MAX_FREE_RATIO_FLAG_NAME).getValue() + ); + } + + private static void allocateList(List garbage, int count, int size) { + for (int i = 0; i < count; i++) { + garbage.add(new byte[size]); + } + } + } + + static MemoryUsage getHeapMemoryUsage() { + return ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + } + + /** + * Prints memory usage to standard output + */ + static class MemoryUsagePrinter { + + public static String humanReadableByteCount(long bytes, boolean si) { + int unit = si ? 1000 : 1024; + if (bytes < unit) { + return bytes + " B"; + } + int exp = (int) (Math.log(bytes) / Math.log(unit)); + String pre = (si ? "kMGTPE" : "KMGTPE").charAt(exp - 1) + (si ? "" : "i"); + return String.format("%.1f %sB", bytes / Math.pow(unit, exp), pre); + } + + public static void printMemoryUsage(String label) { + MemoryUsage memusage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + float freeratio = 1f - (float) memusage.getUsed() / memusage.getCommitted(); + System.out.format("[%-24s] init: %-7s, used: %-7s, comm: %-7s, freeRatio ~= %.1f%%%n", + label, + humanReadableByteCount(memusage.getInit(), false), + humanReadableByteCount(memusage.getUsed(), false), + humanReadableByteCount(memusage.getCommitted(), false), + freeratio * 100 + ); + } + } +} --- ./hotspot/test/gc/g1/TestSummarizeRSetStatsThreads.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/gc/g1/TestSummarizeRSetStatsThreads.java Wed Feb 04 12:14:39 2015 -0800 @@ -53,8 +53,8 @@ // a zero in refinement thread numbers indicates that the value in ParallelGCThreads should be used. // Additionally use at least one thread. - int expectedNumRefinementThreads = refinementThreads == 0 ? workerThreads : refinementThreads; - expectedNumRefinementThreads = Math.max(1, expectedNumRefinementThreads); + int expectedNumRefinementThreads = refinementThreads; + // create the pattern made up of n copies of a floating point number pattern String numberPattern = String.format("%0" + expectedNumRefinementThreads + "d", 0) .replace("0", "\\s+\\d+\\.\\d+"); @@ -73,9 +73,9 @@ return; } // different valid combinations of number of refinement and gc worker threads - runTest(0, 0); - runTest(0, 5); - runTest(5, 0); + runTest(1, 1); + runTest(1, 5); + runTest(5, 1); runTest(10, 10); runTest(1, 2); runTest(4, 3); --- ./hotspot/test/gc/g1/TestSummarizeRSetStatsTools.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/gc/g1/TestSummarizeRSetStatsTools.java Wed Feb 04 12:14:39 2015 -0800 @@ -88,7 +88,6 @@ ArrayList finalargs = new ArrayList(); String[] defaultArgs = new String[] { "-XX:+UseG1GC", - "-XX:+UseCompressedOops", "-Xmn4m", "-Xmx20m", "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/logging/TestGCId.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestGCId + * @bug 8043607 + * @summary Ensure that the GCId is logged + * @key gc + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.ProcessTools; +import com.oracle.java.testlibrary.OutputAnalyzer; + +public class TestGCId { + public static void main(String[] args) throws Exception { + testGCId("UseParallelGC", "PrintGC"); + testGCId("UseParallelGC", "PrintGCDetails"); + + testGCId("UseG1GC", "PrintGC"); + testGCId("UseG1GC", "PrintGCDetails"); + + testGCId("UseConcMarkSweepGC", "PrintGC"); + testGCId("UseConcMarkSweepGC", "PrintGCDetails"); + + testGCId("UseSerialGC", "PrintGC"); + testGCId("UseSerialGC", "PrintGCDetails"); + } + + private static void verifyContainsGCIDs(OutputAnalyzer output) { + output.shouldMatch("^#0: \\["); + output.shouldMatch("^#1: \\["); + output.shouldHaveExitValue(0); + } + + private static void verifyContainsNoGCIDs(OutputAnalyzer output) { + output.shouldNotMatch("^#[0-9]+: \\["); + output.shouldHaveExitValue(0); + } + + private static void testGCId(String gcFlag, String logFlag) throws Exception { + // GCID logging enabled + ProcessBuilder pb_enabled = + ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:+PrintGCID", GCTest.class.getName()); + verifyContainsGCIDs(new OutputAnalyzer(pb_enabled.start())); + + // GCID logging disabled + ProcessBuilder pb_disabled = + ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:-PrintGCID", GCTest.class.getName()); + verifyContainsNoGCIDs(new OutputAnalyzer(pb_disabled.start())); + + // GCID logging default + ProcessBuilder pb_default = + ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", GCTest.class.getName()); + verifyContainsNoGCIDs(new OutputAnalyzer(pb_default.start())); + } + + static class GCTest { + private static byte[] garbage; + public static void main(String [] args) { + System.out.println("Creating garbage"); + // create 128MB of garbage. This should result in at least one GC + for (int i = 0; i < 1024; i++) { + garbage = new byte[128 * 1024]; + } + // do a system gc to get one more gc + System.gc(); + System.out.println("Done"); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/metaspace/TestCapacityUntilGCWrapAround.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @key gc + * @bug 8049831 + * @library /testlibrary /testlibrary/whitebox + * @build TestCapacityUntilGCWrapAround + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestCapacityUntilGCWrapAround + */ + +import sun.hotspot.WhiteBox; + +import com.oracle.java.testlibrary.Asserts; +import com.oracle.java.testlibrary.Platform; + +public class TestCapacityUntilGCWrapAround { + private static long MB = 1024 * 1024; + private static long GB = 1024 * MB; + private static long MAX_UINT = 4 * GB - 1; // On 32-bit platforms + + public static void main(String[] args) { + if (Platform.is32bit()) { + WhiteBox wb = WhiteBox.getWhiteBox(); + + long before = wb.metaspaceCapacityUntilGC(); + // Now force possible overflow of capacity_until_GC. + long after = wb.incMetaspaceCapacityUntilGC(MAX_UINT); + + Asserts.assertGTE(after, before, + "Increasing with MAX_UINT should not cause wrap around: " + after + " < " + before); + Asserts.assertLTE(after, MAX_UINT, + "Increasing with MAX_UINT should not cause value larger than MAX_UINT:" + after); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/survivorAlignment/AlignmentHelper.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.management.MemoryPoolMXBean; +import java.util.Optional; + +import sun.hotspot.WhiteBox; + +/** + * Helper class aimed to provide information about alignment of objects in + * particular heap space, expected memory usage after objects' allocation so on. + */ +public class AlignmentHelper { + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + + private static final long OBJECT_ALIGNMENT_IN_BYTES_FOR_32_VM = 8L; + + /** + * Max relative allowed actual memory usage deviation from expected memory + * usage. + */ + private static final float MAX_RELATIVE_DEVIATION = 0.05f; // 5% + + public static final long OBJECT_ALIGNMENT_IN_BYTES = Optional.ofNullable( + AlignmentHelper.WHITE_BOX.getIntxVMFlag("ObjectAlignmentInBytes")) + .orElse(AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES_FOR_32_VM); + + public static final long SURVIVOR_ALIGNMENT_IN_BYTES = Optional.ofNullable( + AlignmentHelper.WHITE_BOX.getIntxVMFlag("SurvivorAlignmentInBytes")) + .orElseThrow(() ->new AssertionError( + "Unable to get SurvivorAlignmentInBytes value")); + /** + * Min amount of memory that will be occupied by an object. + */ + public static final long MIN_OBJECT_SIZE + = AlignmentHelper.WHITE_BOX.getObjectSize(new Object()); + /** + * Min amount of memory that will be occupied by an empty byte array. + */ + public static final long MIN_ARRAY_SIZE + = AlignmentHelper.WHITE_BOX.getObjectSize(new byte[0]); + + /** + * Precision at which actual memory usage in a heap space represented by + * this sizing helper could be measured. + */ + private final long memoryUsageMeasurementPrecision; + /** + * Min amount of memory that will be occupied by an object allocated in a + * heap space represented by this sizing helper. + */ + private final long minObjectSizeInThisSpace; + /** + * Object's alignment in a heap space represented by this sizing helper. + */ + private final long objectAlignmentInThisRegion; + /** + * MemoryPoolMXBean associated with a heap space represented by this sizing + * helper. + */ + private final MemoryPoolMXBean poolMXBean; + + private static long alignUp(long value, long alignment) { + return ((value - 1) / alignment + 1) * alignment; + } + + protected AlignmentHelper(long memoryUsageMeasurementPrecision, + long objectAlignmentInThisRegion, long minObjectSizeInThisSpace, + MemoryPoolMXBean poolMXBean) { + this.memoryUsageMeasurementPrecision = memoryUsageMeasurementPrecision; + this.minObjectSizeInThisSpace = minObjectSizeInThisSpace; + this.objectAlignmentInThisRegion = objectAlignmentInThisRegion; + this.poolMXBean = poolMXBean; + } + + /** + * Returns how many objects have to be allocated to fill + * {@code memoryToFill} bytes in this heap space using objects of size + * {@code objectSize}. + */ + public int getObjectsCount(long memoryToFill, long objectSize) { + return (int) (memoryToFill / getObjectSizeInThisSpace(objectSize)); + } + + /** + * Returns amount of memory that {@code objectsCount} of objects with size + * {@code objectSize} will occupy this this space after allocation. + */ + public long getExpectedMemoryUsage(long objectSize, int objectsCount) { + long correctedObjectSize = getObjectSizeInThisSpace(objectSize); + return AlignmentHelper.alignUp(correctedObjectSize * objectsCount, + memoryUsageMeasurementPrecision); + } + + /** + * Returns current memory usage in this heap space. + */ + public long getActualMemoryUsage() { + return poolMXBean.getUsage().getUsed(); + } + + /** + * Returns maximum memory usage deviation from {@code expectedMemoryUsage} + * given the max allowed relative deviation equal to + * {@code relativeDeviation}. + * + * Note that value returned by this method is aligned according to + * memory measurement precision for this heap space. + */ + public long getAllowedMemoryUsageDeviation(long expectedMemoryUsage) { + long unalignedDeviation = (long) (expectedMemoryUsage * + AlignmentHelper.MAX_RELATIVE_DEVIATION); + return AlignmentHelper.alignUp(unalignedDeviation, + memoryUsageMeasurementPrecision); + } + + /** + * Returns amount of memory that will be occupied by an object with size + * {@code objectSize} in this heap space. + */ + public long getObjectSizeInThisSpace(long objectSize) { + objectSize = Math.max(objectSize, minObjectSizeInThisSpace); + + long alignedObjectSize = AlignmentHelper.alignUp(objectSize, + objectAlignmentInThisRegion); + long sizeDiff = alignedObjectSize - objectSize; + + // If there is not enough space to fit padding object, then object will + // be aligned to {@code 2 * objectAlignmentInThisRegion}. + if (sizeDiff >= AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES + && sizeDiff < AlignmentHelper.MIN_OBJECT_SIZE) { + alignedObjectSize += AlignmentHelper.MIN_OBJECT_SIZE; + alignedObjectSize = AlignmentHelper.alignUp(alignedObjectSize, + objectAlignmentInThisRegion); + } + + return alignedObjectSize; + } + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + + builder.append(String.format("AlignmentHelper for memory pool '%s':%n", + poolMXBean.getName())); + builder.append(String.format("Memory usage measurement precision: %d%n", + memoryUsageMeasurementPrecision)); + builder.append(String.format("Min object size in this space: %d%n", + minObjectSizeInThisSpace)); + builder.append(String.format("Object alignment in this space: %d%n", + objectAlignmentInThisRegion)); + + return builder.toString(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/survivorAlignment/SurvivorAlignmentTestMain.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryPoolMXBean; +import java.util.Objects; +import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import com.oracle.java.testlibrary.Asserts; +import com.sun.management.ThreadMXBean; +import sun.hotspot.WhiteBox; +import sun.misc.Unsafe; + +/** + * Main class for tests on {@code SurvivorAlignmentInBytes} option. + * + * Typical usage is to obtain instance using fromArgs method, allocate objects + * and verify that actual memory usage in tested heap space is close to + * expected. + */ +public class SurvivorAlignmentTestMain { + enum HeapSpace { + EDEN, + SURVIVOR, + TENURED + } + + public static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + + public static final long MAX_TENURING_THRESHOLD = Optional.ofNullable( + SurvivorAlignmentTestMain.WHITE_BOX.getIntxVMFlag( + "MaxTenuringThreshold")).orElse(15L); + + /** + * Regexp used to parse memory size params, like 2G, 34m or 15k. + */ + private static final Pattern SIZE_REGEX + = Pattern.compile("(?[0-9]+)(?[GMKgmk])?"); + + // Names of different heap spaces. + private static final String DEF_NEW_EDEN = "Eden Space"; + private static final String DEF_NEW_SURVIVOR = "Survivor Space"; + private static final String PAR_NEW_EDEN = "Par Eden Space"; + private static final String PAR_NEW_SURVIVOR = "Par Survivor Space"; + private static final String PS_EDEN = "PS Eden Space"; + private static final String PS_SURVIVOR = "PS Survivor Space"; + private static final String G1_EDEN = "G1 Eden Space"; + private static final String G1_SURVIVOR = "G1 Survivor Space"; + private static final String SERIAL_TENURED = "Tenured Gen"; + private static final String CMS_TENURED = "CMS Old Gen"; + private static final String PS_TENURED = "PS Old Gen"; + private static final String G1_TENURED = "G1 Old Gen"; + + private static final long G1_HEAP_REGION_SIZE = Optional.ofNullable( + SurvivorAlignmentTestMain.WHITE_BOX.getUintxVMFlag( + "G1HeapRegionSize")).orElse(-1L); + + /** + * Min size of free chunk in CMS generation. + * An object allocated in CMS generation will at least occupy this amount + * of bytes. + */ + private static final long CMS_MIN_FREE_CHUNK_SIZE + = 3L * Unsafe.ADDRESS_SIZE; + + private static final AlignmentHelper EDEN_SPACE_HELPER; + private static final AlignmentHelper SURVIVOR_SPACE_HELPER; + private static final AlignmentHelper TENURED_SPACE_HELPER; + /** + * Amount of memory that should be filled during a test run. + */ + private final long memoryToFill; + /** + * The size of an objects that will be allocated during a test run. + */ + private final long objectSize; + /** + * Amount of memory that will be actually occupied by an object in eden + * space. + */ + private final long actualObjectSize; + /** + * Storage for allocated objects. + */ + private final Object[] garbage; + /** + * Heap space whose memory usage is a subject of assertions during the test + * run. + */ + private final HeapSpace testedSpace; + + private long[] baselinedThreadMemoryUsage = null; + private long[] threadIds = null; + + /** + * Initialize {@code EDEN_SPACE_HELPER}, {@code SURVIVOR_SPACE_HELPER} and + * {@code TENURED_SPACE_HELPER} to represent heap spaces in use. + * + * Note that regardless to GC object's alignment in survivor space is + * expected to be equal to {@code SurvivorAlignmentInBytes} value and + * alignment in other spaces is expected to be equal to + * {@code ObjectAlignmentInBytes} value. + * + * In CMS generation we can't allocate less then {@code MinFreeChunk} value, + * for other CGs we expect that object of size {@code MIN_OBJECT_SIZE} + * could be allocated as it is (of course, its size could be aligned + * according to alignment value used in a particular space). + * + * For G1 GC MXBeans could report memory usage only with region size + * precision (if an object allocated in some G1 heap region, then all region + * will claimed as used), so for G1's spaces precision is equal to + * {@code G1HeapRegionSize} value. + */ + static { + AlignmentHelper edenHelper = null; + AlignmentHelper survivorHelper = null; + AlignmentHelper tenuredHelper = null; + for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans()) { + switch (pool.getName()) { + case SurvivorAlignmentTestMain.DEF_NEW_EDEN: + case SurvivorAlignmentTestMain.PAR_NEW_EDEN: + case SurvivorAlignmentTestMain.PS_EDEN: + Asserts.assertNull(edenHelper, + "Only one bean for eden space is expected."); + edenHelper = new AlignmentHelper( + AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES, + AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES, + AlignmentHelper.MIN_OBJECT_SIZE, pool); + break; + case SurvivorAlignmentTestMain.G1_EDEN: + Asserts.assertNull(edenHelper, + "Only one bean for eden space is expected."); + edenHelper = new AlignmentHelper( + SurvivorAlignmentTestMain.G1_HEAP_REGION_SIZE, + AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES, + AlignmentHelper.MIN_OBJECT_SIZE, pool); + break; + case SurvivorAlignmentTestMain.DEF_NEW_SURVIVOR: + case SurvivorAlignmentTestMain.PAR_NEW_SURVIVOR: + case SurvivorAlignmentTestMain.PS_SURVIVOR: + Asserts.assertNull(survivorHelper, + "Only one bean for survivor space is expected."); + survivorHelper = new AlignmentHelper( + AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES, + AlignmentHelper.SURVIVOR_ALIGNMENT_IN_BYTES, + AlignmentHelper.MIN_OBJECT_SIZE, pool); + break; + case SurvivorAlignmentTestMain.G1_SURVIVOR: + Asserts.assertNull(survivorHelper, + "Only one bean for survivor space is expected."); + survivorHelper = new AlignmentHelper( + SurvivorAlignmentTestMain.G1_HEAP_REGION_SIZE, + AlignmentHelper.SURVIVOR_ALIGNMENT_IN_BYTES, + AlignmentHelper.MIN_OBJECT_SIZE, pool); + break; + case SurvivorAlignmentTestMain.SERIAL_TENURED: + case SurvivorAlignmentTestMain.PS_TENURED: + case SurvivorAlignmentTestMain.G1_TENURED: + Asserts.assertNull(tenuredHelper, + "Only one bean for tenured space is expected."); + tenuredHelper = new AlignmentHelper( + AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES, + AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES, + AlignmentHelper.MIN_OBJECT_SIZE, pool); + break; + case SurvivorAlignmentTestMain.CMS_TENURED: + Asserts.assertNull(tenuredHelper, + "Only one bean for tenured space is expected."); + tenuredHelper = new AlignmentHelper( + AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES, + AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES, + SurvivorAlignmentTestMain.CMS_MIN_FREE_CHUNK_SIZE, + pool); + break; + } + } + EDEN_SPACE_HELPER = Objects.requireNonNull(edenHelper, + "AlignmentHelper for eden space should be initialized."); + SURVIVOR_SPACE_HELPER = Objects.requireNonNull(survivorHelper, + "AlignmentHelper for survivor space should be initialized."); + TENURED_SPACE_HELPER = Objects.requireNonNull(tenuredHelper, + "AlignmentHelper for tenured space should be initialized."); + } + /** + * Returns an SurvivorAlignmentTestMain instance constructed using CLI + * options. + * + * Following options are expected: + *
    + *
  • memoryToFill
  • + *
  • objectSize
  • + *
+ * + * Both argument may contain multiplier suffix k, m or g. + */ + public static SurvivorAlignmentTestMain fromArgs(String[] args) { + Asserts.assertEQ(args.length, 3, "Expected three arguments: " + + "memory size, object size and tested heap space name."); + + long memoryToFill = parseSize(args[0]); + long objectSize = Math.max(parseSize(args[1]), + AlignmentHelper.MIN_ARRAY_SIZE); + HeapSpace testedSpace = HeapSpace.valueOf(args[2]); + + return new SurvivorAlignmentTestMain(memoryToFill, objectSize, + testedSpace); + } + + /** + * Returns a value parsed from a string with format + * <integer><multiplier>. + */ + private static long parseSize(String sizeString) { + Matcher matcher = SIZE_REGEX.matcher(sizeString); + Asserts.assertTrue(matcher.matches(), + "sizeString should have following format \"[0-9]+([MBK])?\""); + long size = Long.valueOf(matcher.group("size")); + + if (matcher.group("multiplier") != null) { + long K = 1024L; + // fall through multipliers + switch (matcher.group("multiplier").toLowerCase()) { + case "g": + size *= K; + case "m": + size *= K; + case "k": + size *= K; + } + } + return size; + } + + private SurvivorAlignmentTestMain(long memoryToFill, long objectSize, + HeapSpace testedSpace) { + this.objectSize = objectSize; + this.memoryToFill = memoryToFill; + this.testedSpace = testedSpace; + + AlignmentHelper helper = SurvivorAlignmentTestMain.EDEN_SPACE_HELPER; + + this.actualObjectSize = helper.getObjectSizeInThisSpace( + this.objectSize); + int arrayLength = helper.getObjectsCount(memoryToFill, this.objectSize); + garbage = new Object[arrayLength]; + } + + /** + * Allocate byte arrays to fill {@code memoryToFill} memory. + */ + public void allocate() { + int byteArrayLength = Math.max((int) (objectSize + - Unsafe.ARRAY_BYTE_BASE_OFFSET), 0); + + for (int i = 0; i < garbage.length; i++) { + garbage[i] = new byte[byteArrayLength]; + } + } + + /** + * Release memory occupied after {@code allocate} call. + */ + public void release() { + for (int i = 0; i < garbage.length; i++) { + garbage[i] = null; + } + } + + /** + * Returns expected amount of memory occupied in a {@code heapSpace} by + * objects referenced from {@code garbage} array. + */ + public long getExpectedMemoryUsage() { + AlignmentHelper alignmentHelper = getAlignmentHelper(testedSpace); + return alignmentHelper.getExpectedMemoryUsage(objectSize, + garbage.length); + } + + /** + * Verifies that memory usage in a {@code heapSpace} deviates from + * {@code expectedUsage} for no more than {@code MAX_RELATIVE_DEVIATION}. + */ + public void verifyMemoryUsage(long expectedUsage) { + AlignmentHelper alignmentHelper = getAlignmentHelper(testedSpace); + + long actualMemoryUsage = alignmentHelper.getActualMemoryUsage(); + boolean otherThreadsAllocatedMemory = areOtherThreadsAllocatedMemory(); + + long memoryUsageDiff = Math.abs(actualMemoryUsage - expectedUsage); + long maxAllowedUsageDiff + = alignmentHelper.getAllowedMemoryUsageDeviation(expectedUsage); + + System.out.println("Verifying memory usage in space: " + testedSpace); + System.out.println("Allocated objects count: " + garbage.length); + System.out.println("Desired object size: " + objectSize); + System.out.println("Actual object size: " + actualObjectSize); + System.out.println("Expected object size in space: " + + alignmentHelper.getObjectSizeInThisSpace(objectSize)); + System.out.println("Expected memory usage: " + expectedUsage); + System.out.println("Actual memory usage: " + actualMemoryUsage); + System.out.println("Memory usage diff: " + memoryUsageDiff); + System.out.println("Max allowed usage diff: " + maxAllowedUsageDiff); + + if (memoryUsageDiff > maxAllowedUsageDiff + && otherThreadsAllocatedMemory) { + System.out.println("Memory usage diff is incorrect, but it seems " + + "like someone else allocated objects"); + return; + } + + Asserts.assertLTE(memoryUsageDiff, maxAllowedUsageDiff, + "Actual memory usage should not deviate from expected for " + + "more then " + maxAllowedUsageDiff); + } + + /** + * Baselines amount of memory allocated by each thread. + */ + public void baselineMemoryAllocation() { + ThreadMXBean bean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); + threadIds = bean.getAllThreadIds(); + baselinedThreadMemoryUsage = bean.getThreadAllocatedBytes(threadIds); + } + + /** + * Checks if threads other then the current thread were allocating objects + * after baselinedThreadMemoryUsage call. + * + * If baselinedThreadMemoryUsage was not called, then this method will return + * {@code false}. + */ + public boolean areOtherThreadsAllocatedMemory() { + if (baselinedThreadMemoryUsage == null) { + return false; + } + + ThreadMXBean bean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); + long currentMemoryAllocation[] + = bean.getThreadAllocatedBytes(threadIds); + boolean otherThreadsAllocatedMemory = false; + + System.out.println("Verifying amount of memory allocated by threads:"); + for (int i = 0; i < threadIds.length; i++) { + System.out.format("Thread %d%nbaseline allocation: %d" + + "%ncurrent allocation:%d%n", threadIds[i], + baselinedThreadMemoryUsage[i], currentMemoryAllocation[i]); + System.out.println(bean.getThreadInfo(threadIds[i])); + + long bytesAllocated = Math.abs(currentMemoryAllocation[i] + - baselinedThreadMemoryUsage[i]); + if (bytesAllocated > 0 + && threadIds[i] != Thread.currentThread().getId()) { + otherThreadsAllocatedMemory = true; + } + } + + return otherThreadsAllocatedMemory; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + + builder.append(String.format("SurvivorAlignmentTestMain info:%n")); + builder.append(String.format("Desired object size: %d%n", objectSize)); + builder.append(String.format("Memory to fill: %d%n", memoryToFill)); + builder.append(String.format("Objects to be allocated: %d%n", + garbage.length)); + + builder.append(String.format("Alignment helpers to be used: %n")); + for (HeapSpace heapSpace: HeapSpace.values()) { + builder.append(String.format("For space %s:%n%s%n", heapSpace, + getAlignmentHelper(heapSpace))); + } + + return builder.toString(); + } + + /** + * Returns {@code AlignmentHelper} for a space {@code heapSpace}. + */ + public static AlignmentHelper getAlignmentHelper(HeapSpace heapSpace) { + switch (heapSpace) { + case EDEN: + return SurvivorAlignmentTestMain.EDEN_SPACE_HELPER; + case SURVIVOR: + return SurvivorAlignmentTestMain.SURVIVOR_SPACE_HELPER; + case TENURED: + return SurvivorAlignmentTestMain.TENURED_SPACE_HELPER; + default: + throw new Error("Unexpected heap space: " + heapSpace); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/survivorAlignment/TestAllocationInEden.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8031323 + * @summary Verify that object's alignment in eden space is not affected by + * SurvivorAlignmentInBytes option. + * @library /testlibrary /testlibrary/whitebox + * @build TestAllocationInEden SurvivorAlignmentTestMain AlignmentHelper + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB + * -XX:OldSize=128m -XX:MaxHeapSize=192m + * -XX:-ExplicitGCInvokesConcurrent + * TestAllocationInEden 10m 9 EDEN + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB + * -XX:OldSize=128m -XX:MaxHeapSize=192m + * -XX:-ExplicitGCInvokesConcurrent + * TestAllocationInEden 10m 47 EDEN + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB + * -XX:OldSize=128m -XX:MaxHeapSize=192m + * -XX:-ExplicitGCInvokesConcurrent + * TestAllocationInEden 10m 9 EDEN + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB + * -XX:OldSize=128m -XX:MaxHeapSize=192m + * -XX:-ExplicitGCInvokesConcurrent + * TestAllocationInEden 10m 87 EDEN + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB + * -XX:OldSize=128m -XX:MaxHeapSize=192m + * -XX:-ExplicitGCInvokesConcurrent + * TestAllocationInEden 10m 9 EDEN + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB + * -XX:OldSize=128m -XX:MaxHeapSize=192m + * -XX:-ExplicitGCInvokesConcurrent + * TestAllocationInEden 10m 147 EDEN + */ +public class TestAllocationInEden { + public static void main(String args[]) { + SurvivorAlignmentTestMain test + = SurvivorAlignmentTestMain.fromArgs(args); + System.out.println(test); + + long expectedMemoryUsage = test.getExpectedMemoryUsage(); + test.baselineMemoryAllocation(); + System.gc(); + + test.allocate(); + + test.verifyMemoryUsage(expectedMemoryUsage); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/survivorAlignment/TestPromotionFromEdenToTenured.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8031323 + * @summary Verify that objects promoted from eden space to tenured space during + * full GC are not aligned to SurvivorAlignmentInBytes value. + * @library /testlibrary /testlibrary/whitebox + * @build TestPromotionFromEdenToTenured SurvivorAlignmentTestMain + * AlignmentHelper + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:OldSize=32m -XX:MaxHeapSize=96m -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=32 + * TestPromotionFromEdenToTenured 10m 9 TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:OldSize=32m -XX:MaxHeapSize=96m -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=32 + * TestPromotionFromEdenToTenured 10m 47 TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:OldSize=32m -XX:MaxHeapSize=96m + * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=64 + * TestPromotionFromEdenToTenured 10m 9 TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:OldSize=32m -XX:MaxHeapSize=128m + * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=64 + * TestPromotionFromEdenToTenured 10m 87 TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:OldSize=32M -XX:MaxHeapSize=96m -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=128 + * TestPromotionFromEdenToTenured 10m 9 TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m + * -XX:OldSize=32m -XX:MaxHeapSize=96m -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=128 + * TestPromotionFromEdenToTenured 10m 147 TENURED + */ +public class TestPromotionFromEdenToTenured { + public static void main(String args[]) { + SurvivorAlignmentTestMain test + = SurvivorAlignmentTestMain.fromArgs(args); + System.out.println(test); + + long expectedMemoryUsage = test.getExpectedMemoryUsage(); + test.baselineMemoryAllocation(); + System.gc(); + // increase expected usage by current old gen usage + expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper( + SurvivorAlignmentTestMain.HeapSpace.TENURED) + .getActualMemoryUsage(); + + test.allocate(); + System.gc(); + + test.verifyMemoryUsage(expectedMemoryUsage); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8031323 + * @summary Verify that objects promoted from survivor space to tenured space + * during full GC are not aligned to SurvivorAlignmentInBytes value. + * @library /testlibrary /testlibrary/whitebox + * @build TestPromotionFromSurvivorToTenuredAfterFullGC + * SurvivorAlignmentTestMain AlignmentHelper + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:OldSize=32m -XX:MaxHeapSize=160m + * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=32 + * TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:OldSize=32m -XX:MaxHeapSize=160m + * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=32 + * TestPromotionFromSurvivorToTenuredAfterFullGC 20m 47 + * TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=200m -XX:MaxNewSize=200m + * -XX:OldSize=32m -XX:MaxHeapSize=232m + * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=64 + * TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:OldSize=32m -XX:MaxHeapSize=160m + * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=64 + * TestPromotionFromSurvivorToTenuredAfterFullGC 20m 87 + * TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m + * -XX:OldSize=32M -XX:MaxHeapSize=288m + * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=128 + * TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 + * TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:OldSize=32m -XX:MaxHeapSize=160m + * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=128 + * TestPromotionFromSurvivorToTenuredAfterFullGC 20m 147 + * TENURED + */ +public class TestPromotionFromSurvivorToTenuredAfterFullGC { + public static void main(String args[]) { + SurvivorAlignmentTestMain test + = SurvivorAlignmentTestMain.fromArgs(args); + System.out.println(test); + + long expectedMemoryUsage = test.getExpectedMemoryUsage(); + test.baselineMemoryAllocation(); + System.gc(); + // increase expected usage by current old gen usage + expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper( + SurvivorAlignmentTestMain.HeapSpace.TENURED) + .getActualMemoryUsage(); + + test.allocate(); + SurvivorAlignmentTestMain.WHITE_BOX.youngGC(); + System.gc(); + + test.verifyMemoryUsage(expectedMemoryUsage); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8031323 + * @summary Verify that objects promoted from survivor space to tenured space + * when their age exceeded tenuring threshold are not aligned to + * SurvivorAlignmentInBytes value. + * @library /testlibrary /testlibrary/whitebox + * @build TestPromotionFromSurvivorToTenuredAfterMinorGC + * SurvivorAlignmentTestMain AlignmentHelper + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=32 + * TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9 + * TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=32 + * TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 47 + * TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=200m -XX:MaxNewSize=200m + * -XX:OldSize=32M -XX:MaxHeapSize=232m -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=64 + * TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9 + * TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=64 + * TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 87 + * TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m + * -XX:OldSize=32M -XX:MaxHeapSize=288m -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=128 + * TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9 + * TENURED + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=128 + * TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 147 + * TENURED + */ +public class TestPromotionFromSurvivorToTenuredAfterMinorGC { + public static void main(String args[]) throws Exception { + SurvivorAlignmentTestMain test + = SurvivorAlignmentTestMain.fromArgs(args); + System.out.println(test); + + long expectedMemoryUsage = test.getExpectedMemoryUsage(); + test.baselineMemoryAllocation(); + SurvivorAlignmentTestMain.WHITE_BOX.fullGC(); + // increase expected usage by current old gen usage + expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper( + SurvivorAlignmentTestMain.HeapSpace.TENURED) + .getActualMemoryUsage(); + + test.allocate(); + for (int i = 0; i <= SurvivorAlignmentTestMain.MAX_TENURING_THRESHOLD; + i++) { + SurvivorAlignmentTestMain.WHITE_BOX.youngGC(); + } + + test.verifyMemoryUsage(expectedMemoryUsage); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/survivorAlignment/TestPromotionToSurvivor.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8031323 + * @summary Verify that objects promoted from eden space to survivor space after + * minor GC are aligned to SurvivorAlignmentInBytes. + * @library /testlibrary /testlibrary/whitebox + * @build TestPromotionToSurvivor + * SurvivorAlignmentTestMain AlignmentHelper + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=32 -XX:OldSize=128m + * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent + * TestPromotionToSurvivor 10m 9 SURVIVOR + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=32 -XX:OldSize=128m + * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent + * TestPromotionToSurvivor 20m 47 SURVIVOR + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=64 -XX:OldSize=128m + * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent + * TestPromotionToSurvivor 8m 9 SURVIVOR + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=64 -XX:OldSize=128m + * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent + * TestPromotionToSurvivor 20m 87 SURVIVOR + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=128 -XX:OldSize=128m + * -XX:MaxHeapSize=384m -XX:-ExplicitGCInvokesConcurrent + * TestPromotionToSurvivor 10m 9 SURVIVOR + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m + * -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=128 -XX:OldSize=128m + * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent + * TestPromotionToSurvivor 20m 147 SURVIVOR + */ +public class TestPromotionToSurvivor { + public static void main(String args[]) { + SurvivorAlignmentTestMain test + = SurvivorAlignmentTestMain.fromArgs(args); + System.out.println(test); + + long expectedUsage = test.getExpectedMemoryUsage(); + test.baselineMemoryAllocation(); + SurvivorAlignmentTestMain.WHITE_BOX.fullGC(); + + test.allocate(); + SurvivorAlignmentTestMain.WHITE_BOX.youngGC(); + + test.verifyMemoryUsage(expectedUsage); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/gc/whitebox/TestWBGC.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestWBGC + * @bug 8055098 + * @summary Test verify that WB methods isObjectInOldGen and youngGC works correctly. + * @library /testlibrary /testlibrary/whitebox + * @build TestWBGC + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run driver TestWBGC + */ +import com.oracle.java.testlibrary.*; +import sun.hotspot.WhiteBox; + +public class TestWBGC { + + public static void main(String args[]) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + true, + "-Xbootclasspath/a:.", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+WhiteBoxAPI", + "-XX:MaxTenuringThreshold=1", + "-XX:+PrintGC", + GCYoungTest.class.getName()); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + System.out.println(output.getStdout()); + output.shouldHaveExitValue(0); + output.shouldContain("WhiteBox Initiated Young GC"); + output.shouldNotContain("Full"); + // To be sure that we don't provoke Full GC additionaly to young + } + + public static class GCYoungTest { + static WhiteBox wb = WhiteBox.getWhiteBox(); + public static Object obj; + + public static void main(String args[]) { + obj = new Object(); + Asserts.assertFalse(wb.isObjectInOldGen(obj)); + wb.youngGC(); + wb.youngGC(); + // 2 young GC is needed to promote object into OldGen + Asserts.assertTrue(wb.isObjectInOldGen(obj)); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/CheckEndorsedAndExtDirs/EndorsedExtDirs.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8064667 + * @summary Sanity test for -XX:+CheckEndorsedAndExtDirs + * @library /testlibrary + * @build com.oracle.java.testlibrary.* + * @run main/othervm EndorsedExtDirs + */ + +import com.oracle.java.testlibrary.*; +import java.io.File; +import java.io.IOException; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; + +public class EndorsedExtDirs { + static final String cpath = System.getProperty("test.classes", "."); + public static void main(String arg[]) throws Exception { + fatalError("-XX:+CheckEndorsedAndExtDirs", "-Djava.endorsed.dirs=foo"); + fatalError("-XX:+CheckEndorsedAndExtDirs", "-Djava.ext.dirs=bar"); + testNonEmptySystemExtDirs(); + } + + static void testNonEmptySystemExtDirs() throws Exception { + String home = System.getProperty("java.home"); + Path ext = Paths.get(home, "lib", "ext"); + String extDirs = System.getProperty("java.ext.dirs"); + String[] dirs = extDirs.split(File.pathSeparator); + long count = 0; + for (String d : dirs) { + Path path = Paths.get(d); + if (Files.notExists(path) || path.equals(ext)) continue; + count += Files.find(path, 1, (Path p, BasicFileAttributes attr) + -> p.getFileName().toString().endsWith(".jar")) + .count(); + } + if (count > 0) { + fatalError("-XX:+CheckEndorsedAndExtDirs"); + } + } + + static ProcessBuilder newProcessBuilder(String... args) { + List commands = new ArrayList<>(); + String java = System.getProperty("java.home") + "/bin/java"; + commands.add(java); + for (String s : args) { + commands.add(s); + } + commands.add("-cp"); + commands.add(cpath); + commands.add("EndorsedExtDirs"); + + System.out.println("Process " + commands); + return new ProcessBuilder(commands); + } + + static void fatalError(String... args) throws Exception { + fatalError(newProcessBuilder(args)); + } + + static void fatalError(ProcessBuilder pb) throws Exception { + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Could not create the Java Virtual Machine"); + output.shouldHaveExitValue(1); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/LoadClass/ShowClassLoader.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @key regression + * @bug 8058927 + * @summary Make sure array class has the right class loader + * @run main ShowClassLoader + */ + +public class ShowClassLoader { + + public static void main(String[] args) { + Object[] oa = new Object[0]; + ShowClassLoader[] sa = new ShowClassLoader[0]; + + System.out.println("Classloader for Object[] is " + oa.getClass().getClassLoader()); + System.out.println("Classloader for SCL[] is " + sa.getClass().getClassLoader() ); + + if (sa.getClass().getClassLoader() == null) { + throw new RuntimeException("Wrong class loader"); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/AutoshutdownNMT.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @key nmt + * @summary Test for deprecated message if -XX:-AutoShutdownNMT is specified + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.*; + +public class AutoshutdownNMT { + + public static void main(String args[]) throws Exception { + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:NativeMemoryTracking=detail", + "-XX:-AutoShutdownNMT", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("ignoring option AutoShutdownNMT"); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/ChangeTrackingLevel.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8059100 + * @summary Test that you can decrease NMT tracking level but not increase it. + * @key nmt + * @library /testlibrary /testlibrary/whitebox + * @build ChangeTrackingLevel + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ChangeTrackingLevel + */ + +import com.oracle.java.testlibrary.*; +import sun.hotspot.WhiteBox; + +public class ChangeTrackingLevel { + + public static WhiteBox wb = WhiteBox.getWhiteBox(); + public static void main(String args[]) throws Exception { + boolean testChangeLevel = wb.NMTChangeTrackingLevel(); + if (testChangeLevel) { + System.out.println("NMT level change test passed."); + } else { + // it also fails if the VM asserts. + throw new RuntimeException("NMT level change test failed"); + } + } +}; --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/JcmdBaselineDetail.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @key nmt jcmd + * @summary Verify that jcmd correctly reports that baseline succeeds with NMT enabled with detailed tracking. + * @library /testlibrary + * @run main/othervm -XX:NativeMemoryTracking=detail JcmdBaselineDetail + */ + +import com.oracle.java.testlibrary.*; + +public class JcmdBaselineDetail { + + public static void main(String args[]) throws Exception { + // Grab my own PID + String pid = Integer.toString(ProcessTools.getProcessId()); + OutputAnalyzer output; + + ProcessBuilder pb = new ProcessBuilder(); + + // Run 'jcmd VM.native_memory baseline=true' + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"}); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Baseline succeeded"); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/JcmdDetailDiff.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary run NMT baseline, allocate memory and verify output from detail.diff + * @key nmt jcmd + * @library /testlibrary /testlibrary/whitebox + * @ignore + * @build JcmdDetailDiff + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail JcmdDetailDiff + */ + +import com.oracle.java.testlibrary.*; + +import sun.hotspot.WhiteBox; + +public class JcmdDetailDiff { + + public static WhiteBox wb = WhiteBox.getWhiteBox(); + + public static void main(String args[]) throws Exception { + ProcessBuilder pb = new ProcessBuilder(); + OutputAnalyzer output; + // Grab my own PID + String pid = Integer.toString(ProcessTools.getProcessId()); + + long commitSize = 128 * 1024; + long reserveSize = 256 * 1024; + long addr; + + // Run 'jcmd VM.native_memory baseline=true' + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"}); + pb.start().waitFor(); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Baseline succeeded"); + + addr = wb.NMTReserveMemory(reserveSize); + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"}); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)"); + + wb.NMTCommitMemory(addr, commitSize); + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"}); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=256KB +256KB, committed=128KB +128KB)"); + + wb.NMTUncommitMemory(addr, commitSize); + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"}); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)"); + + wb.NMTReleaseMemory(addr, reserveSize); + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"}); + + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("Test (reserved="); + } +} --- ./hotspot/test/runtime/NMT/JcmdScale.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/NMT/JcmdScale.java Wed Feb 04 12:14:39 2015 -0800 @@ -41,15 +41,15 @@ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"}); output = new OutputAnalyzer(pb.start()); - output.shouldContain("KB, committed="); + output.shouldContain("KB, committed="); pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=MB"}); output = new OutputAnalyzer(pb.start()); - output.shouldContain("MB, committed="); + output.shouldContain("MB, committed="); pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=GB"}); output = new OutputAnalyzer(pb.start()); - output.shouldContain("GB, committed="); + output.shouldContain("GB, committed="); pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=apa"}); output = new OutputAnalyzer(pb.start()); @@ -57,7 +57,7 @@ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=GB"}); output = new OutputAnalyzer(pb.start()); - output.shouldContain("GB, committed="); + output.shouldContain("GB, committed="); pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=apa"}); output = new OutputAnalyzer(pb.start()); --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/JcmdScaleDetail.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @key nmt jcmd + * @summary Test the NMT scale parameter with detail tracking level + * @library /testlibrary + * @run main/othervm -XX:NativeMemoryTracking=detail JcmdScaleDetail + */ + +import com.oracle.java.testlibrary.*; + +public class JcmdScaleDetail { + + public static void main(String args[]) throws Exception { + ProcessBuilder pb = new ProcessBuilder(); + OutputAnalyzer output; + // Grab my own PID + String pid = Integer.toString(ProcessTools.getProcessId()); + + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("KB, committed="); + + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=MB"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("MB, committed="); + + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=GB"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("GB, committed="); + + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=apa"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Incorrect scale value: apa"); + + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=GB"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("GB, committed="); + + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=apa"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Incorrect scale value: apa"); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/JcmdSummaryDiff.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary run NMT baseline, allocate memory and verify output from summary.diff + * @key nmt jcmd + * @library /testlibrary /testlibrary/whitebox + * @build JcmdSummaryDiff + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=summary JcmdSummaryDiff + */ + +import com.oracle.java.testlibrary.*; + +import sun.hotspot.WhiteBox; + +public class JcmdSummaryDiff { + + public static WhiteBox wb = WhiteBox.getWhiteBox(); + + public static void main(String args[]) throws Exception { + ProcessBuilder pb = new ProcessBuilder(); + OutputAnalyzer output; + // Grab my own PID + String pid = Integer.toString(ProcessTools.getProcessId()); + + long commitSize = 128 * 1024; + long reserveSize = 256 * 1024; + long addr; + + // Run 'jcmd VM.native_memory baseline=true' + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"}); + pb.start().waitFor(); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Baseline succeeded"); + + addr = wb.NMTReserveMemory(reserveSize); + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"}); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)"); + + wb.NMTCommitMemory(addr, commitSize); + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"}); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=256KB +256KB, committed=128KB +128KB)"); + + wb.NMTUncommitMemory(addr, commitSize); + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"}); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)"); + + wb.NMTReleaseMemory(addr, reserveSize); + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"}); + + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("Test (reserved="); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/MallocRoundingReportTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test consistency of NMT by creating allocations of the Test type with various sizes and verifying visibility with jcmd + * @key nmt jcmd + * @library /testlibrary /testlibrary/whitebox + * @build MallocRoundingReportTest + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocRoundingReportTest + * + */ + +import com.oracle.java.testlibrary.*; + +import sun.hotspot.WhiteBox; + +public class MallocRoundingReportTest { + private static long K = 1024; + + public static void main(String args[]) throws Exception { + OutputAnalyzer output; + WhiteBox wb = WhiteBox.getWhiteBox(); + + // Grab my own PID + String pid = Integer.toString(ProcessTools.getProcessId()); + ProcessBuilder pb = new ProcessBuilder(); + + long[] additionalBytes = {0, 1, 512, 650}; + long[] kByteSize = {1024, 2048}; + long mallocd_total = 0; + for ( int i = 0; i < kByteSize.length; i++) + { + for (int j = 0; j < (additionalBytes.length); j++) { + long curKB = kByteSize[i] + additionalBytes[j]; + // round up/down to the nearest KB to match NMT reporting + long numKB = (curKB % kByteSize[i] >= 512) ? ((curKB / K) + 1) : curKB / K; + // Use WB API to alloc and free with the mtTest type + mallocd_total = wb.NMTMalloc(curKB); + // Run 'jcmd VM.native_memory summary', check for expected output + // NMT does not track memory allocations less than 1KB, and rounds to the nearest KB + String expectedOut = ("Test (reserved=" + numKB + "KB, committed=" + numKB + "KB)"); + + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary" }); + output = new OutputAnalyzer(pb.start()); + output.shouldContain(expectedOut); + + wb.NMTFree(mallocd_total); + // Run 'jcmd VM.native_memory summary', check for expected output + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary" }); + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("Test (reserved="); + } + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/MallocSiteHashOverflow.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test corner case that overflows malloc site hashtable bucket + * @requires sun.arch.data.model == "32" + * @key nmt jcmd stress + * @library /testlibrary /testlibrary/whitebox + * @build MallocSiteHashOverflow + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocSiteHashOverflow + */ + +import com.oracle.java.testlibrary.*; +import sun.hotspot.WhiteBox; + +public class MallocSiteHashOverflow { + + public static void main(String args[]) throws Exception { + + // Size of entries based on malloc tracking header defined in mallocTracker.hpp + // For 32-bit systems, create 257 malloc sites with the same hash bucket to overflow a hash bucket + long entries = 257; + + OutputAnalyzer output; + WhiteBox wb = WhiteBox.getWhiteBox(); + int MAX_HASH_SIZE = wb.NMTGetHashSize(); + + // Grab my own PID + String pid = Integer.toString(ProcessTools.getProcessId()); + ProcessBuilder pb = new ProcessBuilder(); + + // Verify that current tracking level is "detail" + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Native Memory Tracking Statistics"); + + // Attempt to cause NMT to downgrade tracking level by allocating small amounts + // of memory with random pseudo call stack + int pc = 1; + for (int i = 0; i < entries; i++) { + long addr = wb.NMTMallocWithPseudoStack(1, pc); + if (addr == 0) { + throw new RuntimeException("NMTMallocWithPseudoStack: out of memory"); + } + // We free memory here since it doesn't affect pseudo malloc alloc site hash table entries + wb.NMTFree(addr); + pc += MAX_HASH_SIZE; + if (i == entries) { + // Verify that tracking has been downgraded + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Tracking level has been downgraded due to lack of resources"); + } + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/MallocStressTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Stress test for malloc tracking + * @key nmt jcmd stress + * @library /testlibrary /testlibrary/whitebox + * @build MallocStressTest + * @ignore - This test is disabled since it will stress NMT and timeout during normal testing + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocStressTest + */ + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import com.oracle.java.testlibrary.*; +import sun.hotspot.WhiteBox; + +public class MallocStressTest { + private static int K = 1024; + + // The stress test runs in three phases: + // 1. alloc: A lot of malloc with fewer free, which simulates a burst memory allocation + // that is usually seen during startup or class loading. + // 2. pause: Pause the test to check accuracy of native memory tracking + // 3. release: Release all malloc'd memory and check native memory tracking result. + public enum TestPhase { + alloc, + pause, + release + }; + + static TestPhase phase = TestPhase.alloc; + + // malloc'd memory + static ArrayList mallocd_memory = new ArrayList(); + static long mallocd_total = 0; + static WhiteBox whiteBox; + static AtomicInteger pause_count = new AtomicInteger(); + + static boolean is_64_bit_system; + + private static boolean is_64_bit_system() { return is_64_bit_system; } + + public static void main(String args[]) throws Exception { + is_64_bit_system = (Platform.is64bit()); + + OutputAnalyzer output; + whiteBox = WhiteBox.getWhiteBox(); + + // Grab my own PID + String pid = Integer.toString(ProcessTools.getProcessId()); + ProcessBuilder pb = new ProcessBuilder(); + + AllocThread[] alloc_threads = new AllocThread[256]; + ReleaseThread[] release_threads = new ReleaseThread[64]; + + int index; + // Create many allocation threads + for (index = 0; index < alloc_threads.length; index ++) { + alloc_threads[index] = new AllocThread(); + } + + // Fewer release threads + for (index = 0; index < release_threads.length; index ++) { + release_threads[index] = new ReleaseThread(); + } + + if (is_64_bit_system()) { + sleep_wait(2*60*1000); + } else { + sleep_wait(60*1000); + } + // pause the stress test + phase = TestPhase.pause; + while (pause_count.intValue() < alloc_threads.length + release_threads.length) { + sleep_wait(10); + } + + long mallocd_total_in_KB = (mallocd_total + K / 2) / K; + + // Now check if the result from NMT matches the total memory allocated. + String expected_test_summary = "Test (reserved=" + mallocd_total_in_KB +"KB, committed=" + mallocd_total_in_KB + "KB)"; + // Run 'jcmd VM.native_memory summary' + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain(expected_test_summary); + + // Release all allocated memory + phase = TestPhase.release; + synchronized(mallocd_memory) { + mallocd_memory.notifyAll(); + } + + // Join all threads + for (index = 0; index < alloc_threads.length; index ++) { + try { + alloc_threads[index].join(); + } catch (InterruptedException e) { + } + } + + for (index = 0; index < release_threads.length; index ++) { + try { + release_threads[index].join(); + } catch (InterruptedException e) { + } + } + + // All test memory allocated should be released + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("Test (reserved="); + + // Verify that tracking level has not been downgraded + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"}); + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("Tracking level has been downgraded due to lack of resources"); + } + + private static void sleep_wait(int n) { + try { + Thread.sleep(n); + } catch (InterruptedException e) { + } + } + + + static class MallocMemory { + private long addr; + private int size; + + MallocMemory(long addr, int size) { + this.addr = addr; + this.size = size; + } + + long addr() { return this.addr; } + int size() { return this.size; } + } + + static class AllocThread extends Thread { + AllocThread() { + this.setName("MallocThread"); + this.start(); + } + + // AllocThread only runs "Alloc" phase + public void run() { + Random random = new Random(); + while (MallocStressTest.phase == TestPhase.alloc) { + int r = Math.abs(random.nextInt()); + // Only malloc small amount to avoid OOM + int size = r % 32; + if (is_64_bit_system()) { + r = r % 32 * K; + } else { + r = r % 64; + } + if (size == 0) size = 1; + long addr = MallocStressTest.whiteBox.NMTMallocWithPseudoStack(size, r); + if (addr != 0) { + MallocMemory mem = new MallocMemory(addr, size); + synchronized(MallocStressTest.mallocd_memory) { + MallocStressTest.mallocd_memory.add(mem); + MallocStressTest.mallocd_total += size; + } + } else { + System.out.println("Out of malloc memory"); + break; + } + } + MallocStressTest.pause_count.incrementAndGet(); + } + } + + static class ReleaseThread extends Thread { + private Random random = new Random(); + ReleaseThread() { + this.setName("ReleaseThread"); + this.start(); + } + + public void run() { + while(true) { + switch(MallocStressTest.phase) { + case alloc: + slow_release(); + break; + case pause: + enter_pause(); + break; + case release: + quick_release(); + return; + } + } + } + + private void enter_pause() { + MallocStressTest.pause_count.incrementAndGet(); + while (MallocStressTest.phase != MallocStressTest.TestPhase.release) { + try { + synchronized(MallocStressTest.mallocd_memory) { + MallocStressTest.mallocd_memory.wait(10); + } + } catch (InterruptedException e) { + } + } + } + + private void quick_release() { + List free_list; + while (true) { + synchronized(MallocStressTest.mallocd_memory) { + if (MallocStressTest.mallocd_memory.isEmpty()) return; + int size = Math.min(MallocStressTest.mallocd_memory.size(), 5000); + List subList = MallocStressTest.mallocd_memory.subList(0, size); + free_list = new ArrayList(subList); + subList.clear(); + } + for (int index = 0; index < free_list.size(); index ++) { + MallocMemory mem = free_list.get(index); + MallocStressTest.whiteBox.NMTFree(mem.addr()); + } + } + } + + private void slow_release() { + try { + Thread.sleep(10); + } catch (InterruptedException e) { + } + synchronized(MallocStressTest.mallocd_memory) { + if (MallocStressTest.mallocd_memory.isEmpty()) return; + int n = Math.abs(random.nextInt()) % MallocStressTest.mallocd_memory.size(); + MallocMemory mem = mallocd_memory.remove(n); + MallocStressTest.whiteBox.NMTFree(mem.addr()); + MallocStressTest.mallocd_total -= mem.size(); + } + } + } +} --- ./hotspot/test/runtime/NMT/MallocTestType.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/NMT/MallocTestType.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,11 +51,6 @@ long memAlloc1 = wb.NMTMalloc(512 * 1024); wb.NMTFree(memAlloc2); - // Use WB API to ensure that all data has been merged before we continue - if (!wb.NMTWaitForDataMerge()) { - throw new Exception("Call to WB API NMTWaitForDataMerge() failed"); - } - // Run 'jcmd VM.native_memory summary' pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"}); output = new OutputAnalyzer(pb.start()); @@ -64,10 +59,6 @@ // Free the memory allocated by NMTAllocTest wb.NMTFree(memAlloc1); - // Use WB API to ensure that all data has been merged before we continue - if (!wb.NMTWaitForDataMerge()) { - throw new Exception("Call to WB API NMTWaitForDataMerge() failed"); - } output = new OutputAnalyzer(pb.start()); output.shouldNotContain("Test (reserved="); } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/MallocTrackingVerify.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8054836 + * @summary Test to verify correctness of malloc tracking + * @key nmt jcmd + * @library /testlibrary /testlibrary/whitebox + * @build MallocTrackingVerify + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocTrackingVerify + * + */ + +import java.util.ArrayList; +import java.util.Random; + +import com.oracle.java.testlibrary.*; + +import sun.hotspot.WhiteBox; + +public class MallocTrackingVerify { + private static int MAX_ALLOC = 4 * 1024; + + static ArrayList mallocd_memory = new ArrayList(); + static long mallocd_total = 0; + public static WhiteBox wb = WhiteBox.getWhiteBox(); + + public static void main(String args[]) throws Exception { + OutputAnalyzer output; + + // Grab my own PID + String pid = Integer.toString(ProcessTools.getProcessId()); + ProcessBuilder pb = new ProcessBuilder(); + + Random random = new Random(); + // Allocate small amounts of memory with random pseudo call stack + while (mallocd_total < MAX_ALLOC) { + int size = random.nextInt(31) + 1; + long addr = wb.NMTMallocWithPseudoStack(size, random.nextInt()); + if (addr != 0) { + MallocMemory mem = new MallocMemory(addr, size); + mallocd_memory.add(mem); + mallocd_total += size; + } else { + System.out.println("Out of malloc memory"); + break; + } + } + + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary" }); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=4KB, committed=4KB)"); + + // Free + for (MallocMemory mem : mallocd_memory) { + wb.NMTFree(mem.addr()); + } + + // Run 'jcmd VM.native_memory summary', check for expected output + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, + "VM.native_memory", "summary" }); + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("Test (reserved="); + } + + static class MallocMemory { + private long addr; + private int size; + + MallocMemory(long addr, int size) { + this.addr = addr; + this.size = size; + } + + long addr() { + return this.addr; + } + + int size() { + return this.size; + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/NMTWithCDS.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8055061 + * @key nmt + * @library /testlibrary + * @run main NMTWithCDS + */ +import com.oracle.java.testlibrary.*; + +public class NMTWithCDS { + + public static void main(String[] args) throws Exception { + ProcessBuilder pb; + pb = ProcessTools.createJavaProcessBuilder("-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + try { + output.shouldContain("Loading classes to share"); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder( + "-XX:NativeMemoryTracking=detail", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("sharing"); + output.shouldHaveExitValue(0); + + } catch (RuntimeException e) { + // Report 'passed' if CDS was turned off. + output.shouldContain("Unable to use shared archive"); + output.shouldHaveExitValue(1); + } + } +} --- ./hotspot/test/runtime/NMT/PrintNMTStatistics.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/NMT/PrintNMTStatistics.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,46 +24,40 @@ /* * @test * @key nmt regression - * @bug 8005936 - * @summary Make sure PrintNMTStatistics works on normal JVM exit - * @library /testlibrary /testlibrary/whitebox - * @build PrintNMTStatistics - * @run main ClassFileInstaller sun.hotspot.WhiteBox - * @run main PrintNMTStatistics + * @bug 8005936 8058606 + * @summary Verify PrintNMTStatistics on normal JVM exit for detail and summary tracking level + * @library /testlibrary */ import com.oracle.java.testlibrary.*; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import sun.hotspot.WhiteBox; - public class PrintNMTStatistics { - public static void main(String args[]) throws Exception { - - // We start a new java process running with an argument and use WB API to ensure - // we have data for NMT on VM exit - if (args.length > 0) { - // Use WB API to ensure that all data has been merged before we continue - if (!WhiteBox.getWhiteBox().NMTWaitForDataMerge()) { - throw new Exception("Call to WB API NMTWaitForDataMerge() failed"); - } - return; - } + public static void main(String args[]) throws Exception { ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UnlockDiagnosticVMOptions", - "-Xbootclasspath/a:.", - "-XX:+WhiteBoxAPI", - "-XX:NativeMemoryTracking=summary", - "-XX:+PrintNMTStatistics", - "PrintNMTStatistics", - "test"); + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+PrintNMTStatistics", + "-XX:NativeMemoryTracking=detail", + "-version"); - OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldContain("Java Heap (reserved="); - output.shouldNotContain("error"); - output.shouldHaveExitValue(0); - } + OutputAnalyzer output_detail = new OutputAnalyzer(pb.start()); + output_detail.shouldContain("Virtual memory map:"); + output_detail.shouldContain("Details:"); + output_detail.shouldNotContain("error"); + output_detail.shouldHaveExitValue(0); + + ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+PrintNMTStatistics", + "-XX:NativeMemoryTracking=summary", + "-version"); + + OutputAnalyzer output_summary = new OutputAnalyzer(pb1.start()); + output_summary.shouldContain("Java Heap (reserved="); + output_summary.shouldNotContain("Virtual memory map:"); + output_summary.shouldNotContain("Details:"); + output_summary.shouldNotContain("error"); + output_summary.shouldHaveExitValue(0); + } } --- ./hotspot/test/runtime/NMT/ReleaseCommittedMemory.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/NMT/ReleaseCommittedMemory.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,7 +44,6 @@ addr = wb.NMTReserveMemory(reserveSize); wb.NMTCommitMemory(addr, 128*1024); wb.NMTReleaseMemory(addr, reserveSize); - wb.NMTWaitForDataMerge(); } } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/ReleaseNoCommit.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Release uncommitted memory and make sure NMT handles it correctly + * @key nmt regression + * @library /testlibrary /testlibrary/whitebox + * @build ReleaseNoCommit + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=summary ReleaseNoCommit + */ + +import com.oracle.java.testlibrary.JDKToolFinder; +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.ProcessTools; + +import sun.hotspot.WhiteBox; + +public class ReleaseNoCommit { + + public static void main(String args[]) throws Exception { + WhiteBox wb = WhiteBox.getWhiteBox(); + long reserveSize = 256 * 1024; + long addr; + + ProcessBuilder pb = new ProcessBuilder(); + OutputAnalyzer output; + // Grab my own PID + String pid = Integer.toString(ProcessTools.getProcessId()); + + addr = wb.NMTReserveMemory(reserveSize); + // Check for reserved + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain(" Test (reserved=256KB, committed=0KB)"); + + wb.NMTReleaseMemory(addr, reserveSize); + + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"}); + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("Test (reserved="); + } +} --- ./hotspot/test/runtime/NMT/ShutdownTwice.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/NMT/ShutdownTwice.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,12 +45,12 @@ output = new OutputAnalyzer(pb.start()); // Verify that jcmd reports that NMT is shutting down - output.shouldContain("Shutdown is in progress, it will take a few moments to completely shutdown"); + output.shouldContain("Native memory tracking has been turned off"); // Run shutdown again output = new OutputAnalyzer(pb.start()); // Verify that jcmd reports that NMT has been shutdown already - output.shouldContain("Native memory tracking has been shutdown by user"); + output.shouldContain("Native memory tracking has been shutdown"); } } --- ./hotspot/test/runtime/NMT/SummaryAfterShutdown.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/NMT/SummaryAfterShutdown.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,13 +44,13 @@ output = new OutputAnalyzer(pb.start()); // Verify that jcmd reports that NMT is shutting down - output.shouldContain("Shutdown is in progress, it will take a few moments to completely shutdown"); + output.shouldContain("Native memory tracking has been turned off"); // Run 'jcmd VM.native_memory summary' pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"}); output = new OutputAnalyzer(pb.start()); // Verify that jcmd reports that NMT has been shutdown - output.shouldContain("Native memory tracking has been shutdown by user"); + output.shouldContain("Native memory tracking has been shutdown"); } } --- ./hotspot/test/runtime/NMT/SummarySanityCheck.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/NMT/SummarySanityCheck.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,11 +44,6 @@ // Grab my own PID String pid = Integer.toString(ProcessTools.getProcessId()); - // Use WB API to ensure that all data has been merged before we continue - if (!WhiteBox.getWhiteBox().NMTWaitForDataMerge()) { - throw new Exception("Call to WB API NMTWaitForDataMerge() failed"); - } - ProcessBuilder pb = new ProcessBuilder(); // Run 'jcmd VM.native_memory summary scale=KB' @@ -69,13 +64,13 @@ // Match '- (reserved=KB, committed=KB) Pattern mtTypePattern = Pattern.compile("-\\s+(?[\\w\\s]+)\\(reserved=(?\\d+)KB,\\scommitted=(?\\d+)KB\\)"); // Match 'Total: reserved=KB, committed=KB' - Pattern totalMemoryPattern = Pattern.compile("Total\\:\\s\\sreserved=(?\\d+)KB,\\s\\scommitted=(?\\d+)KB"); + Pattern totalMemoryPattern = Pattern.compile("Total\\:\\sreserved=(?\\d+)KB,\\scommitted=(?\\d+)KB"); for (int i = 0; i < lines.length; i++) { if (lines[i].startsWith("Total")) { Matcher totalMemoryMatcher = totalMemoryPattern.matcher(lines[i]); - if (totalMemoryMatcher.matches() && totalMemoryMatcher.groupCount() == 2) { + if (totalMemoryMatcher.matches()) { totalCommitted = Integer.parseInt(totalMemoryMatcher.group("committed")); totalReserved = Integer.parseInt(totalMemoryMatcher.group("reserved")); } else { --- ./hotspot/test/runtime/NMT/ThreadedMallocTestType.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/NMT/ThreadedMallocTestType.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,11 +58,6 @@ allocThread.start(); allocThread.join(); - // Use WB API to ensure that all data has been merged before we continue - if (!wb.NMTWaitForDataMerge()) { - throw new Exception("Call to WB API NMTWaitForDataMerge() failed"); - } - // Run 'jcmd VM.native_memory summary' pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"}); output = new OutputAnalyzer(pb.start()); @@ -80,11 +75,6 @@ freeThread.start(); freeThread.join(); - // Use WB API to ensure that all data has been merged before we continue - if (!wb.NMTWaitForDataMerge()) { - throw new Exception("Call to WB API NMTWaitForDataMerge() failed"); - } - output = new OutputAnalyzer(pb.start()); output.shouldNotContain("Test (reserved="); } --- ./hotspot/test/runtime/NMT/ThreadedVirtualAllocTestType.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/NMT/ThreadedVirtualAllocTestType.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,8 +60,6 @@ reserveThread.start(); reserveThread.join(); - mergeData(); - pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"}); output = new OutputAnalyzer(pb.start()); output.shouldContain("Test (reserved=512KB, committed=0KB)"); @@ -77,8 +75,6 @@ commitThread.start(); commitThread.join(); - mergeData(); - output = new OutputAnalyzer(pb.start()); output.shouldContain("Test (reserved=512KB, committed=128KB)"); if (has_nmt_detail) { @@ -93,8 +89,6 @@ uncommitThread.start(); uncommitThread.join(); - mergeData(); - output = new OutputAnalyzer(pb.start()); output.shouldContain("Test (reserved=512KB, committed=0KB)"); output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed"); @@ -107,17 +101,9 @@ releaseThread.start(); releaseThread.join(); - mergeData(); - output = new OutputAnalyzer(pb.start()); output.shouldNotContain("Test (reserved="); output.shouldNotContain("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved"); } - public static void mergeData() throws Exception { - // Use WB API to ensure that all data has been merged before we continue - if (!wb.NMTWaitForDataMerge()) { - throw new Exception("Call to WB API NMTWaitForDataMerge() failed"); } - } -} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/NMT/VirtualAllocCommitUncommitRecommit.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test reserve/commit/uncommit/release of virtual memory and that we track it correctly + * @key nmt jcmd + * @library /testlibrary /testlibrary/whitebox + * @build VirtualAllocCommitUncommitRecommit + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail VirtualAllocCommitUncommitRecommit + * + */ + +import com.oracle.java.testlibrary.*; + +import sun.hotspot.WhiteBox; + +public class VirtualAllocCommitUncommitRecommit { + + public static WhiteBox wb = WhiteBox.getWhiteBox(); + + public static void main(String args[]) throws Exception { + OutputAnalyzer output; + long commitSize = 4 * 1024; // 4KB + long reserveSize = 1024 * 1024; // 1024KB + long addr; + + String pid = Integer.toString(ProcessTools.getProcessId()); + ProcessBuilder pb = new ProcessBuilder(); + + boolean has_nmt_detail = wb.NMTIsDetailSupported(); + if (has_nmt_detail) { + System.out.println("NMT detail support detected."); + } else { + System.out.println("NMT detail support not detected."); + } + + // reserve + addr = wb.NMTReserveMemory(reserveSize); + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, + "VM.native_memory", "detail" }); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=1024KB, committed=0KB)"); + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + + Long.toHexString(addr + reserveSize) + + "\\] reserved 1024KB for Test"); + } + + long addrA = addr; + long addrB = addr + commitSize; + long addrC = addr + (2 * commitSize); + long addrD = addr + (3 * commitSize); + long addrE = addr + (4 * commitSize); + long addrF = addr + (5 * commitSize); + + // commit ABCD + wb.NMTCommitMemory(addrA, commitSize); + wb.NMTCommitMemory(addrB, commitSize); + wb.NMTCommitMemory(addrC, commitSize); + wb.NMTCommitMemory(addrD, commitSize); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=1024KB, committed=16KB)"); + + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + + Long.toHexString(addr + reserveSize) + + "\\] reserved 1024KB for Test"); + } + // uncommit BC + wb.NMTUncommitMemory(addrB, commitSize); + wb.NMTUncommitMemory(addrC, commitSize); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=1024KB, committed=8KB)"); + + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + + Long.toHexString(addr + reserveSize) + + "\\] reserved 1024KB for Test"); + } + + // commit EF + wb.NMTCommitMemory(addrE, commitSize); + wb.NMTCommitMemory(addrF, commitSize); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=1024KB, committed=16KB)"); + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + + Long.toHexString(addr + reserveSize) + + "\\] reserved 1024KB for Test"); + } + + // uncommit A + wb.NMTUncommitMemory(addrA, commitSize); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=1024KB, committed=12KB)"); + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + + Long.toHexString(addr + reserveSize) + + "\\] reserved 1024KB for Test"); + } + + // commit ABC + wb.NMTCommitMemory(addrA, commitSize); + wb.NMTCommitMemory(addrB, commitSize); + wb.NMTCommitMemory(addrC, commitSize); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=1024KB, committed=24KB)"); + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + + Long.toHexString(addr + reserveSize) + + "\\] reserved 1024KB for Test"); + } + + // uncommit ABCDEF + wb.NMTUncommitMemory(addrA, commitSize); + wb.NMTUncommitMemory(addrB, commitSize); + wb.NMTUncommitMemory(addrC, commitSize); + wb.NMTUncommitMemory(addrD, commitSize); + wb.NMTUncommitMemory(addrE, commitSize); + wb.NMTUncommitMemory(addrF, commitSize); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=1024KB, committed=0KB)"); + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + + Long.toHexString(addr + reserveSize) + + "\\] reserved 1024KB for Test"); + } + + // release + wb.NMTReleaseMemory(addr, reserveSize); + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("Test (reserved="); + output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + + Long.toHexString(addr + reserveSize) + "\\] reserved 1024KB for Test"); + } +} --- ./hotspot/test/runtime/NMT/VirtualAllocTestType.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/NMT/VirtualAllocTestType.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,7 +54,6 @@ } addr = wb.NMTReserveMemory(reserveSize); - mergeData(); pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"}); output = new OutputAnalyzer(pb.start()); @@ -65,7 +64,6 @@ wb.NMTCommitMemory(addr, commitSize); - mergeData(); output = new OutputAnalyzer(pb.start()); output.shouldContain("Test (reserved=256KB, committed=128KB)"); @@ -75,24 +73,15 @@ wb.NMTUncommitMemory(addr, commitSize); - mergeData(); output = new OutputAnalyzer(pb.start()); output.shouldContain("Test (reserved=256KB, committed=0KB)"); output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed"); wb.NMTReleaseMemory(addr, reserveSize); - mergeData(); output = new OutputAnalyzer(pb.start()); output.shouldNotContain("Test (reserved="); output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved"); } - - public static void mergeData() throws Exception { - // Use WB API to ensure that all data has been merged before we continue - if (!wb.NMTWaitForDataMerge()) { - throw new Exception("Call to WB API NMTWaitForDataMerge() failed"); } - } -} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/RedefineTests/RedefineAnnotations.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,410 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @library /testlibrary + * @summary Test that type annotations are retained after a retransform + * @run main RedefineAnnotations buildagent + * @run main/othervm -javaagent:redefineagent.jar RedefineAnnotations + */ + +import static com.oracle.java.testlibrary.Asserts.assertTrue; +import java.io.FileNotFoundException; +import java.io.PrintWriter; +import java.lang.NoSuchFieldException; +import java.lang.NoSuchMethodException; +import java.lang.RuntimeException; +import java.lang.annotation.Annotation; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.lang.instrument.ClassFileTransformer; +import java.lang.instrument.IllegalClassFormatException; +import java.lang.instrument.Instrumentation; +import java.lang.instrument.UnmodifiableClassException; +import java.lang.reflect.AnnotatedArrayType; +import java.lang.reflect.AnnotatedParameterizedType; +import java.lang.reflect.AnnotatedType; +import java.lang.reflect.AnnotatedWildcardType; +import java.lang.reflect.Executable; +import java.lang.reflect.TypeVariable; +import java.security.ProtectionDomain; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import jdk.internal.org.objectweb.asm.ClassReader; +import jdk.internal.org.objectweb.asm.ClassVisitor; +import jdk.internal.org.objectweb.asm.ClassWriter; +import jdk.internal.org.objectweb.asm.FieldVisitor; +import static jdk.internal.org.objectweb.asm.Opcodes.ASM5; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE_USE) +@interface TestAnn { + String site(); +} + +public class RedefineAnnotations { + static Instrumentation inst; + public static void premain(String agentArgs, Instrumentation inst) { + RedefineAnnotations.inst = inst; + } + + static class Transformer implements ClassFileTransformer { + + public byte[] asm(ClassLoader loader, String className, + Class classBeingRedefined, + ProtectionDomain protectionDomain, byte[] classfileBuffer) + throws IllegalClassFormatException { + + ClassWriter cw = new ClassWriter(0); + ClassVisitor cv = new ReAddDummyFieldsClassVisitor(ASM5, cw) { }; + ClassReader cr = new ClassReader(classfileBuffer); + cr.accept(cv, 0); + return cw.toByteArray(); + } + + public class ReAddDummyFieldsClassVisitor extends ClassVisitor { + + LinkedList fields = new LinkedList<>(); + + public ReAddDummyFieldsClassVisitor(int api, ClassVisitor cv) { + super(api, cv); + } + + @Override public FieldVisitor visitField(int access, String name, + String desc, String signature, Object value) { + if (name.startsWith("dummy")) { + // Remove dummy field + fields.addLast(new F(access, name, desc, signature, value)); + return null; + } + return cv.visitField(access, name, desc, signature, value); + } + + @Override public void visitEnd() { + F f; + while ((f = fields.pollFirst()) != null) { + // Re-add dummy fields + cv.visitField(f.access, f.name, f.desc, f.signature, f.value); + } + } + + private class F { + private int access; + private String name; + private String desc; + private String signature; + private Object value; + F(int access, String name, String desc, String signature, Object value) { + this.access = access; + this.name = name; + this.desc = desc; + this.signature = signature; + this.value = value; + } + } + } + + @Override public byte[] transform(ClassLoader loader, String className, + Class classBeingRedefined, + ProtectionDomain protectionDomain, byte[] classfileBuffer) + throws IllegalClassFormatException { + + if (className.contains("TypeAnnotatedTestClass")) { + try { + // Here we remove and re-add the dummy fields. This shuffles the constant pool + return asm(loader, className, classBeingRedefined, protectionDomain, classfileBuffer); + } catch (Throwable e) { + // The retransform native code that called this method does not propagate + // exceptions. Instead of getting an uninformative generic error, catch + // problems here and print it, then exit. + e.printStackTrace(); + System.exit(1); + } + } + return null; + } + } + + private static void buildAgent() { + try { + ClassFileInstaller.main("RedefineAnnotations"); + } catch (Exception e) { + throw new RuntimeException("Could not write agent classfile", e); + } + + try { + PrintWriter pw = new PrintWriter("MANIFEST.MF"); + pw.println("Premain-Class: RedefineAnnotations"); + pw.println("Agent-Class: RedefineAnnotations"); + pw.println("Can-Retransform-Classes: true"); + pw.close(); + } catch (FileNotFoundException e) { + throw new RuntimeException("Could not write manifest file for the agent", e); + } + + sun.tools.jar.Main jarTool = new sun.tools.jar.Main(System.out, System.err, "jar"); + if (!jarTool.run(new String[] { "-cmf", "MANIFEST.MF", "redefineagent.jar", "RedefineAnnotations.class" })) { + throw new RuntimeException("Could not write the agent jar file"); + } + } + + public static void main(String argv[]) throws NoSuchFieldException, NoSuchMethodException { + if (argv.length == 1 && argv[0].equals("buildagent")) { + buildAgent(); + return; + } + + if (inst == null) { + throw new RuntimeException("Instrumentation object was null"); + } + + RedefineAnnotations test = new RedefineAnnotations(); + test.testTransformAndVerify(); + } + + // Class type annotations + private Annotation classTypeParameterTA; + private Annotation extendsTA; + private Annotation implementsTA; + + // Field type annotations + private Annotation fieldTA; + private Annotation innerTA; + private Annotation[] arrayTA = new Annotation[4]; + private Annotation[] mapTA = new Annotation[5]; + + // Method type annotations + private Annotation returnTA, methodTypeParameterTA, formalParameterTA, throwsTA; + + private void testTransformAndVerify() + throws NoSuchFieldException, NoSuchMethodException { + + Class c = TypeAnnotatedTestClass.class; + Class myClass = c; + + /* + * Verify that the expected annotations are where they should be before transform. + */ + verifyClassTypeAnnotations(c); + verifyFieldTypeAnnotations(c); + verifyMethodTypeAnnotations(c); + + try { + inst.addTransformer(new Transformer(), true); + inst.retransformClasses(myClass); + } catch (UnmodifiableClassException e) { + throw new RuntimeException(e); + } + + /* + * Verify that the expected annotations are where they should be after transform. + * Also verify that before and after are equal. + */ + verifyClassTypeAnnotations(c); + verifyFieldTypeAnnotations(c); + verifyMethodTypeAnnotations(c); + } + + private void verifyClassTypeAnnotations(Class c) { + Annotation anno; + + anno = c.getTypeParameters()[0].getAnnotations()[0]; + verifyTestAnn(classTypeParameterTA, anno, "classTypeParameter"); + classTypeParameterTA = anno; + + anno = c.getAnnotatedSuperclass().getAnnotations()[0]; + verifyTestAnn(extendsTA, anno, "extends"); + extendsTA = anno; + + anno = c.getAnnotatedInterfaces()[0].getAnnotations()[0]; + verifyTestAnn(implementsTA, anno, "implements"); + implementsTA = anno; + } + + private void verifyFieldTypeAnnotations(Class c) + throws NoSuchFieldException, NoSuchMethodException { + + verifyBasicFieldTypeAnnotations(c); + verifyInnerFieldTypeAnnotations(c); + verifyArrayFieldTypeAnnotations(c); + verifyMapFieldTypeAnnotations(c); + } + + private void verifyBasicFieldTypeAnnotations(Class c) + throws NoSuchFieldException, NoSuchMethodException { + + Annotation anno = c.getDeclaredField("typeAnnotatedBoolean").getAnnotatedType().getAnnotations()[0]; + verifyTestAnn(fieldTA, anno, "field"); + fieldTA = anno; + } + + private void verifyInnerFieldTypeAnnotations(Class c) + throws NoSuchFieldException, NoSuchMethodException { + + AnnotatedType at = c.getDeclaredField("typeAnnotatedInner").getAnnotatedType(); + Annotation anno = at.getAnnotations()[0]; + verifyTestAnn(innerTA, anno, "inner"); + innerTA = anno; + } + + private void verifyArrayFieldTypeAnnotations(Class c) + throws NoSuchFieldException, NoSuchMethodException { + + Annotation anno; + AnnotatedType at; + + at = c.getDeclaredField("typeAnnotatedArray").getAnnotatedType(); + anno = at.getAnnotations()[0]; + verifyTestAnn(arrayTA[0], anno, "array1"); + arrayTA[0] = anno; + + for (int i = 1; i <= 3; i++) { + at = ((AnnotatedArrayType) at).getAnnotatedGenericComponentType(); + anno = at.getAnnotations()[0]; + verifyTestAnn(arrayTA[i], anno, "array" + (i + 1)); + arrayTA[i] = anno; + } + } + + private void verifyMapFieldTypeAnnotations(Class c) + throws NoSuchFieldException, NoSuchMethodException { + + Annotation anno; + AnnotatedType atBase; + AnnotatedType atParameter; + atBase = c.getDeclaredField("typeAnnotatedMap").getAnnotatedType(); + + anno = atBase.getAnnotations()[0]; + verifyTestAnn(mapTA[0], anno, "map1"); + mapTA[0] = anno; + + atParameter = + ((AnnotatedParameterizedType) atBase). + getAnnotatedActualTypeArguments()[0]; + anno = ((AnnotatedWildcardType) atParameter).getAnnotations()[0]; + verifyTestAnn(mapTA[1], anno, "map2"); + mapTA[1] = anno; + + anno = + ((AnnotatedWildcardType) atParameter). + getAnnotatedUpperBounds()[0].getAnnotations()[0]; + verifyTestAnn(mapTA[2], anno, "map3"); + mapTA[2] = anno; + + atParameter = + ((AnnotatedParameterizedType) atBase). + getAnnotatedActualTypeArguments()[1]; + anno = ((AnnotatedParameterizedType) atParameter).getAnnotations()[0]; + verifyTestAnn(mapTA[3], anno, "map4"); + mapTA[3] = anno; + + anno = + ((AnnotatedParameterizedType) atParameter). + getAnnotatedActualTypeArguments()[0].getAnnotations()[0]; + verifyTestAnn(mapTA[4], anno, "map5"); + mapTA[4] = anno; + } + + private void verifyMethodTypeAnnotations(Class c) + throws NoSuchFieldException, NoSuchMethodException { + Annotation anno; + Executable typeAnnotatedMethod = + c.getDeclaredMethod("typeAnnotatedMethod", TypeAnnotatedTestClass.class); + + anno = typeAnnotatedMethod.getAnnotatedReturnType().getAnnotations()[0]; + verifyTestAnn(returnTA, anno, "return"); + returnTA = anno; + + anno = typeAnnotatedMethod.getTypeParameters()[0].getAnnotations()[0]; + verifyTestAnn(methodTypeParameterTA, anno, "methodTypeParameter"); + methodTypeParameterTA = anno; + + anno = typeAnnotatedMethod.getAnnotatedParameterTypes()[0].getAnnotations()[0]; + verifyTestAnn(formalParameterTA, anno, "formalParameter"); + formalParameterTA = anno; + + anno = typeAnnotatedMethod.getAnnotatedExceptionTypes()[0].getAnnotations()[0]; + verifyTestAnn(throwsTA, anno, "throws"); + throwsTA = anno; + } + + private static void verifyTestAnn(Annotation verifyAgainst, Annotation anno, String expectedSite) { + verifyTestAnnSite(anno, expectedSite); + + // When called before transform verifyAgainst will be null, when called + // after transform it will be the annotation from before the transform + if (verifyAgainst != null) { + assertTrue(anno.equals(verifyAgainst), + "Annotations do not match before and after." + + " Before: \"" + verifyAgainst + "\", After: \"" + anno + "\""); + } + } + + private static void verifyTestAnnSite(Annotation testAnn, String expectedSite) { + String expectedAnn = "@TestAnn(site=" + expectedSite + ")"; + assertTrue(testAnn.toString().equals(expectedAnn), + "Expected \"" + expectedAnn + "\", got \"" + testAnn + "\""); + } + + public static class TypeAnnotatedTestClass <@TestAnn(site="classTypeParameter") S,T> + extends @TestAnn(site="extends") Thread + implements @TestAnn(site="implements") Runnable { + + public @TestAnn(site="field") boolean typeAnnotatedBoolean; + + public + RedefineAnnotations. + @TestAnn(site="inner") TypeAnnotatedTestClass + typeAnnotatedInner; + + public + @TestAnn(site="array4") boolean + @TestAnn(site="array1") [] + @TestAnn(site="array2") [] + @TestAnn(site="array3") [] + typeAnnotatedArray; + + public @TestAnn(site="map1") Map + <@TestAnn(site="map2") ? extends @TestAnn(site="map3") String, + @TestAnn(site="map4") List<@TestAnn(site="map5") Object>> typeAnnotatedMap; + + public int dummy1; + public int dummy2; + public int dummy3; + + @TestAnn(site="return") <@TestAnn(site="methodTypeParameter") U,V> Class + typeAnnotatedMethod(@TestAnn(site="formalParameter") TypeAnnotatedTestClass arg) + throws @TestAnn(site="throws") ClassNotFoundException { + + @TestAnn(site="local_variable_type") int foo = 0; + throw new ClassNotFoundException(); + } + + public void run() {} + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test ArchiveDoesNotExist + * @summary Test how VM handles "file does not exist" situation while + * attempting to use CDS archive. JVM should exit gracefully + * when sharing mode is ON, and continue w/o sharing if sharing + * mode is AUTO. + * @library /testlibrary + * @run main ArchiveDoesNotExist + */ + +import com.oracle.java.testlibrary.*; +import java.io.File; + +public class ArchiveDoesNotExist { + public static void main(String[] args) throws Exception { + String fileName = "test.jsa"; + + File cdsFile = new File(fileName); + if (cdsFile.exists()) + throw new RuntimeException("Test error: cds file already exists"); + + // Sharing: on + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-Xshare:on", + "-version"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Specified shared archive not found"); + output.shouldHaveExitValue(1); + + // Sharing: auto + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-Xshare:auto", + "-version"); + + output = new OutputAnalyzer(pb.start()); + output.shouldMatch("(java|openjdk) version"); + output.shouldNotContain("sharing"); + output.shouldHaveExitValue(0); + } +} --- ./hotspot/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java Wed Feb 04 12:14:39 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ * is different from object alignment for creating a CDS file * should fail when loading. * @library /testlibrary + * @bug 8025642 */ import com.oracle.java.testlibrary.*; @@ -82,7 +83,11 @@ createAlignment, loadAlignment); - output.shouldContain(expectedErrorMsg); + try { + output.shouldContain(expectedErrorMsg); + } catch (RuntimeException e) { + output.shouldContain("Unable to use shared archive"); + } output.shouldHaveExitValue(1); } } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/SharedArchiveFile/DefaultUseWithClient.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test DefaultUseWithClient + * @summary Test default behavior of sharing with -client + * @library /testlibrary + * @run main DefaultUseWithClient + * @bug 8032224 + */ + +import com.oracle.java.testlibrary.*; +import java.io.File; + +public class DefaultUseWithClient { + public static void main(String[] args) throws Exception { + String fileName = "test.jsa"; + + // On 32-bit windows CDS should be on by default in "-client" config + // Skip this test on any other platform + boolean is32BitWindows = (Platform.isWindows() && Platform.is32bit()); + if (!is32BitWindows) { + System.out.println("Test only applicable on 32-bit Windows. Skipping"); + return; + } + + // create the archive + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-Xshare:dump"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-client", + "-XX:+PrintSharedSpaces", + "-version"); + + output = new OutputAnalyzer(pb.start()); + try { + output.shouldContain("sharing"); + } catch (RuntimeException e) { + // if sharing failed due to ASLR or similar reasons, + // check whether sharing was attempted at all (UseSharedSpaces) + output.shouldContain("UseSharedSpaces:"); + } + output.shouldHaveExitValue(0); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test LimitSharedSizes + * @summary Test handling of limits on shared space size + * @library /testlibrary + * @run main LimitSharedSizes + */ + +import com.oracle.java.testlibrary.*; + +public class LimitSharedSizes { + private static class SharedSizeTestData { + public String optionName; + public String optionValue; + public String expectedErrorMsg; + + public SharedSizeTestData(String name, String value, String msg) { + optionName = name; + optionValue = value; + expectedErrorMsg = msg; + } + } + + private static final SharedSizeTestData[] testTable = { + // values in this part of the test table should cause failure + // (shared space sizes are deliberately too small) + new SharedSizeTestData("-XX:SharedReadOnlySize", "4M", "read only"), + new SharedSizeTestData("-XX:SharedReadWriteSize","4M", "read write"), + + // Known issue, JDK-8038422 (assert() on Windows) + // new SharedSizeTestData("-XX:SharedMiscDataSize", "500k", "miscellaneous data"), + + // Too small of a misc code size should not cause a vm crash. + // It should result in the following error message: + // The shared miscellaneous code space is not large enough + // to preload requested classes. Use -XX:SharedMiscCodeSize= + // to increase the initial size of shared miscellaneous code space. + new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k", "miscellaneous code"), + + // these values are larger than default ones, but should + // be acceptable and not cause failure + new SharedSizeTestData("-XX:SharedReadOnlySize", "20M", null), + new SharedSizeTestData("-XX:SharedReadWriteSize", "20M", null), + new SharedSizeTestData("-XX:SharedMiscDataSize", "20M", null), + new SharedSizeTestData("-XX:SharedMiscCodeSize", "20M", null) + }; + + public static void main(String[] args) throws Exception { + String fileName = "test.jsa"; + + for (SharedSizeTestData td : testTable) { + String option = td.optionName + "=" + td.optionValue; + System.out.println("testing option <" + option + ">"); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + option, + "-Xshare:dump"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + if (td.expectedErrorMsg != null) { + output.shouldContain("The shared " + td.expectedErrorMsg + + " space is not large enough"); + + output.shouldHaveExitValue(2); + } else { + output.shouldNotContain("space is not large enough"); + output.shouldHaveExitValue(0); + } + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/SharedArchiveFile/PrintSharedArchiveAndExit.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8066670 + * @summary Testing -XX:+PrintSharedArchiveAndExit option + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.*; + +public class PrintSharedArchiveAndExit { + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + try { + output.shouldContain("Loading classes to share"); + output.shouldHaveExitValue(0); + + // (1) With a valid archive + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", + "-XX:+PrintSharedArchiveAndExit", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("archive is valid"); + output.shouldNotContain("java version"); // Should not print JVM version + output.shouldHaveExitValue(0); // Should report success in error code. + + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", + "-XX:+PrintSharedArchiveAndExit"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("archive is valid"); + output.shouldNotContain("Usage:"); // Should not print JVM help message + output.shouldHaveExitValue(0); // Should report success in error code. + + // (2) With an invalid archive (boot class path has been prepended) + pb = ProcessTools.createJavaProcessBuilder( + "-Xbootclasspath/p:foo.jar", + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", + "-XX:+PrintSharedArchiveAndExit", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("archive is invalid"); + output.shouldNotContain("java version"); // Should not print JVM version + output.shouldHaveExitValue(1); // Should report failure in error code. + + pb = ProcessTools.createJavaProcessBuilder( + "-Xbootclasspath/p:foo.jar", + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", + "-XX:+PrintSharedArchiveAndExit"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("archive is invalid"); + output.shouldNotContain("Usage:"); // Should not print JVM help message + output.shouldHaveExitValue(1); // Should report failure in error code. + } catch (RuntimeException e) { + e.printStackTrace(); + output.shouldContain("Unable to use shared archive"); + output.shouldHaveExitValue(1); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/SharedArchiveFile/SharedBaseAddress.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test SharedBaseAddress + * @summary Test variety of values for SharedBaseAddress, making sure + * VM handles normal values as well as edge values w/o a crash. + * @library /testlibrary + * @run main SharedBaseAddress + */ + +import com.oracle.java.testlibrary.*; + +public class SharedBaseAddress { + + // shared base address test table + private static final String[] testTable = { + "1g", "8g", "64g","512g", "4t", + "32t", "128t", "0", + "1", "64k", "64M" + }; + + public static void main(String[] args) throws Exception { + // Known issue on Solaris-Sparc + // @ignore JDK-8044600 + if (Platform.isSolaris() && Platform.isSparc()) + return; + + for (String testEntry : testTable) { + System.out.println("sharedBaseAddress = " + testEntry); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=test.jsa", + "-XX:SharedBaseAddress=" + testEntry, + "-Xshare:dump"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + output.shouldContain("Loading classes to share"); + + try { + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=test.jsa", + "-Xshare:on", + "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("sharing"); + output.shouldHaveExitValue(0); + } catch (RuntimeException e) { + output.shouldContain("Unable to use shared archive"); + output.shouldHaveExitValue(1); + } + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test SpaceUtilizationCheck + * @summary Check if the space utilization for shared spaces is adequate + * @library /testlibrary + * @run main SpaceUtilizationCheck + */ + +import com.oracle.java.testlibrary.*; + +import java.util.regex.Pattern; +import java.util.regex.Matcher; +import java.util.ArrayList; +import java.lang.Integer; + +public class SpaceUtilizationCheck { + // Minimum allowed utilization value (percent) + // The goal is to have this number to be 50% for RO and RW regions + // Once that feature is implemented, increase the MIN_UTILIZATION to 50 + private static final int MIN_UTILIZATION = 30; + + // Only RO and RW regions are considered for this check, since they + // currently account for the bulk of the shared space + private static final int NUMBER_OF_CHECKED_SHARED_REGIONS = 2; + + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./test.jsa", + "-Xshare:dump"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + String stdout = output.getStdout(); + ArrayList utilization = findUtilization(stdout); + + if (utilization.size() != NUMBER_OF_CHECKED_SHARED_REGIONS ) + throw new RuntimeException("The output format of sharing summary has changed"); + + for(String str : utilization) { + int value = Integer.parseInt(str); + if (value < MIN_UTILIZATION) { + System.out.println(stdout); + throw new RuntimeException("Utilization for one of the regions" + + "is below a threshold of " + MIN_UTILIZATION + "%"); + } + } + } + + public static ArrayList findUtilization(String input) { + ArrayList regions = filterRegionsOfInterest(input.split("\n")); + return filterByPattern(filterByPattern(regions, "bytes \\[.*% used\\]"), "\\d+"); + } + + private static ArrayList filterByPattern(Iterable input, String pattern) { + ArrayList result = new ArrayList(); + for (String str : input) { + Matcher matcher = Pattern.compile(pattern).matcher(str); + if (matcher.find()) { + result.add(matcher.group()); + } + } + return result; + } + + private static ArrayList filterRegionsOfInterest(String[] inputLines) { + ArrayList result = new ArrayList(); + for (String str : inputLines) { + if (str.contains("ro space:") || str.contains("rw space:")) { + result.add(str); + } + } + return result; + } +} --- ./hotspot/test/runtime/lambda-features/InvokespecialInterface.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/runtime/lambda-features/InvokespecialInterface.java Wed Feb 04 12:14:39 2015 -0800 @@ -33,11 +33,12 @@ import java.util.function.*; import java.util.*; +public class InvokespecialInterface { interface I { default void imethod() { System.out.println("I::imethod"); } } -class C implements I { +static class C implements I { public void foo() { I.super.imethod(); } // invokespecial InterfaceMethod public void bar() { I i = this; i.imethod(); } // invokeinterface same public void doSomeInvokedynamic() { @@ -48,7 +49,6 @@ } } -public class InvokespecialInterface { public static void main(java.lang.String[] unused) { // need to create C and call I::foo() C c = new C(); --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/lambda-features/TestInterfaceInit.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test + * @bug 8034275 + * @summary [JDK 8u40] Test interface initialization: only for interfaces declaring default methods + * @run main TestInterfaceInit + */ +import java.util.List; +import java.util.Arrays; +import java.util.ArrayList; + +public class TestInterfaceInit { + + static List> cInitOrder = new ArrayList<>(); + + // Declares a default method and initializes + interface I { + boolean v = TestInterfaceInit.out(I.class); + default void x() {} + } + + // Declares a default method and initializes + interface J extends I { + boolean v = TestInterfaceInit.out(J.class); + default void x() {} + } + // No default method, does not initialize + interface JN extends J { + boolean v = TestInterfaceInit.out(JN.class); + } + + // Declares a default method and initializes + interface K extends I { + boolean v = TestInterfaceInit.out(K.class); + default void x() {} + } + + // No default method, does not initialize + interface KN extends K { + boolean v = TestInterfaceInit.out(KN.class); + } + + interface L extends JN, KN { + boolean v = TestInterfaceInit.out(L.class); + default void x() {} + } + + public static void main(String[] args) { + // Trigger initialization + boolean v = L.v; + + List> expectedCInitOrder = Arrays.asList(I.class,J.class,K.class,L.class); + if (!cInitOrder.equals(expectedCInitOrder)) { + throw new RuntimeException(String.format("Class initialization array %s not equal to expected array %s", cInitOrder, expectedCInitOrder)); + } + } + + static boolean out(Class c) { + System.out.println("#: initializing " + c.getName()); + cInitOrder.add(c); + return true; + } + +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/runtime/lambda-features/TestInterfaceOrder.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test + * @bug 8034275 + * @summary [JDK 8u40] Test interface initialization order + * @run main TestInterfaceOrder + */ + +import java.util.List; +import java.util.Arrays; +import java.util.ArrayList; + +public class TestInterfaceOrder { + static List> cInitOrder = new ArrayList<>(); + + public static void main(java.lang.String[] args) { + //Trigger initialization + C c = new C(); + + List> expectedCInitOrder = Arrays.asList(I.class, J.class, A.class, K.class, B.class, L.class, C.class); + if (!cInitOrder.equals(expectedCInitOrder)) { + throw new RuntimeException(String.format("Class initialization order %s not equal to expected order %s", cInitOrder, expectedCInitOrder)); + } + } + + interface I { + boolean v = TestInterfaceOrder.out(I.class); + default void i() {} + } + + interface J extends I { + boolean v = TestInterfaceOrder.out(J.class); + default void j() {} + } + + static class A implements J { + static boolean v = TestInterfaceOrder.out(A.class); + } + + interface K extends I { + boolean v = TestInterfaceOrder.out(K.class); + default void k() {} + } + + static class B extends A implements K { + static boolean v = TestInterfaceOrder.out(B.class); + } + + interface L { + boolean v = TestInterfaceOrder.out(L.class); + default void l() {} + } + + static class C extends B implements L { + static boolean v = TestInterfaceOrder.out(C.class); + } + + + static boolean out(Class c) { + System.out.println("#: initializing " + c.getName()); + cInitOrder.add(c); + return true; + } + +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/testlibrary/com/oracle/java/testlibrary/BuildHelper.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.java.testlibrary; + +import java.io.File; +import java.io.FileReader; +import java.util.Properties; + +public class BuildHelper { + + /** + * Commercial builds should have the BUILD_TYPE set to commercial + * within the release file, found at the root of the JDK. + */ + public static boolean isCommercialBuild() throws Exception { + String buildType = getReleaseProperty("BUILD_TYPE","notFound"); + return buildType.equals("commercial"); + } + + + /** + * Return the value for property key, or defaultValue if no property not found. + * If present, double quotes are trimmed. + */ + public static String getReleaseProperty(String key, String defaultValue) throws Exception { + Properties properties = getReleaseProperties(); + String value = properties.getProperty(key, defaultValue); + return trimDoubleQuotes(value); + } + + /** + * Return the value for property key, or null if no property not found. + * If present, double quotes are trimmed. + */ + public static String getReleaseProperty(String key) throws Exception { + return getReleaseProperty(key, null); + } + + /** + * Get properties from the release file + */ + public static Properties getReleaseProperties() throws Exception { + Properties properties = new Properties(); + properties.load(new FileReader(getReleaseFile())); + return properties; + } + + /** + * Every JDK has a release file in its root. + * @return A handler to the release file. + */ + public static File getReleaseFile() throws Exception { + String jdkPath = getJDKRoot(); + File releaseFile = new File(jdkPath,"release"); + if ( ! releaseFile.canRead() ) { + throw new Exception("Release file is not readable, or it is absent: " + + releaseFile.getCanonicalPath()); + } + return releaseFile; + } + + /** + * Returns path to the JDK under test. + * This path is obtained through the test.jdk property, usually set by JTREG. + */ + public static String getJDKRoot() { + String jdkPath = System.getProperty("test.jdk"); + if (jdkPath == null) { + throw new RuntimeException("System property 'test.jdk' not set. This property is normally set by jtreg. " + + "When running test separately, set this property using '-Dtest.jdk=/path/to/jdk'."); + } + return jdkPath; + } + + /** + * Trim double quotes from the beginning and the end of the given string. + * @param original string to trim. + * @return a new trimmed string. + */ + public static String trimDoubleQuotes(String original) { + if (original == null) { return null; } + String trimmed = original.replaceAll("^\"+|\"+$", ""); + return trimmed; + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/testlibrary/com/oracle/java/testlibrary/DynamicVMOption.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.java.testlibrary; + +import com.sun.management.HotSpotDiagnosticMXBean; +import java.lang.management.ManagementFactory; + +/** + * A utility class to work with VM options which could be altered during + * execution. + * + * This class is a wrapper around {@code com.sun.management.VMOption}. + * It provides more convenient interface to read/write the values. + * + */ +public class DynamicVMOption { + + private final HotSpotDiagnosticMXBean mxBean; + + /** + * VM option name, like "MinHeapFreeRatio". + */ + public final String name; + + /** + * Creates an instance of DynamicVMOption. + * + * @param name the VM option name + */ + public DynamicVMOption(String name) { + this.name = name; + mxBean = ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class); + } + + /** + * Sets a new value for the option. + * Trying to set not applicable value will cause IllegalArgumentException. + * Behavior with null is undefined, most likely NPE will be thrown. + * + * @param newValue the value to be set + * @see #getValue() + * @throws IllegalArgumentException if newValue is not applicable to the option + */ + public final void setValue(String newValue) { + mxBean.setVMOption(name, newValue); + } + + /** + * Returns the value of option. + * + * @return the current option value + * @see #setValue(java.lang.String) + */ + public final String getValue() { + return mxBean.getVMOption(name).getValue(); + } + + /** + * Returns true, if option is writable, false otherwise. + * + * @return true, if option is writable, false otherwise + */ + public final boolean isWriteable() { + return mxBean.getVMOption(name).isWriteable(); + } + + /** + * Checks if the given value is applicable for the option. + * + * This method tries to set the option to the new value. If no exception + * has been thrown the value is treated as valid. + * + * Calling this method will not change the option value. After an attempt + * to set a new value, the option will be restored to its previous value. + * + * @param value the value to verify + * @return true if option could be set to the given value + */ + public boolean isValidValue(String value) { + boolean isValid = true; + String oldValue = getValue(); + try { + setValue(value); + } catch (NullPointerException e) { + if (value == null) { + isValid = false; + } + } catch (IllegalArgumentException e) { + isValid = false; + } finally { + setValue(oldValue); + } + return isValid; + } + + /** + * Returns the value of the given VM option as String. + * + * This is a simple shortcut for {@code new DynamicVMOption(name).getValue()} + * + * @param name the name of VM option + * @return value as a string + * @see #getValue() + */ + public static String getString(String name) { + return new DynamicVMOption(name).getValue(); + } + + /** + * Returns the value of the given option as int. + * + * @param name the name of VM option + * @return value parsed as integer + * @see #getString(java.lang.String) + * + */ + public static int getInt(String name) { + return Integer.parseInt(getString(name)); + } + + /** + * Sets the VM option to a new value. + * + * This is a simple shortcut for {@code new DynamicVMOption(name).setValue(value)} + * + * @param name the name of VM option + * @param value the value to be set + * @see #setValue(java.lang.String) + */ + public static void setString(String name, String value) { + new DynamicVMOption(name).setValue(value); + } + + /** + * Sets the VM option value to a new integer value. + * + * @param name the name of VM option + * @param value the integer value to be set + * @see #setString(java.lang.String, java.lang.String) + */ + public static void setInt(String name, int value) { + new DynamicVMOption(name).setValue(Integer.toString(value)); + } + +} --- ./hotspot/test/testlibrary/com/oracle/java/testlibrary/DynamicVMOptionChecker.java Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,121 +0,0 @@ -/* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.java.testlibrary; - -import com.sun.management.HotSpotDiagnosticMXBean; -import com.sun.management.VMOption; -import java.lang.management.ManagementFactory; - -/** - * Simple class to check writeability, invalid and valid values for VMOption - */ -public class DynamicVMOptionChecker { - - /** - * Reads VM option from PlatformMXBean and parse it to integer value - * - * @param name of option - * @return parsed value - */ - public static int getIntValue(String name) { - - VMOption option = ManagementFactory. - getPlatformMXBean(HotSpotDiagnosticMXBean.class). - getVMOption(name); - - return Integer.parseInt(option.getValue()); - } - - /** - * Sets VM option value - * - * @param name of option - * @param value to set - */ - public static void setIntValue(String name, int value) { - ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class).setVMOption(name, Integer.toString(value)); - } - - /** - * Checks that VM option is dynamically writable - * - * @param name - * @throws RuntimeException if option if not writable - * @return always true - */ - public static boolean checkIsWritable(String name) { - VMOption option = ManagementFactory. - getPlatformMXBean(HotSpotDiagnosticMXBean.class). - getVMOption(name); - - if (!option.isWriteable()) { - throw new RuntimeException(name + " is not writable"); - } - - return true; - } - - /** - * Checks that value cannot be set - * - * @param name of flag - * @param value string representation of value to set - * @throws RuntimeException on error - when expected exception hasn't been thrown - */ - public static void checkInvalidValue(String name, String value) { - // should throw - try { - ManagementFactory. - getPlatformMXBean(HotSpotDiagnosticMXBean.class). - setVMOption(name, value); - - } catch (IllegalArgumentException e) { - return; - } - - throw new RuntimeException("Expected IllegalArgumentException was not thrown, " + name + "= " + value); - } - - /** - * Checks that value can be set - * - * @param name of flag to set - * @param value string representation of value to set - * @throws RuntimeException on error - when value in VM is not equal to origin - */ - public static void checkValidValue(String name, String value) { - ManagementFactory. - getPlatformMXBean(HotSpotDiagnosticMXBean.class). - setVMOption(name, value); - - VMOption option = ManagementFactory. - getPlatformMXBean(HotSpotDiagnosticMXBean.class). - getVMOption(name); - - if (!option.getValue().equals(value)) { - throw new RuntimeException("Actual value of " + name + " \"" + option.getValue() - + "\" not equal origin \"" + value + "\""); - } - } - -} --- ./hotspot/test/testlibrary/com/oracle/java/testlibrary/TestDynamicVMOption.java Mon Dec 08 12:28:35 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.java.testlibrary; - -/** - * Simple class to check writeability, invalid and valid values for concrete VMOption - */ -public class TestDynamicVMOption { - - private final String name; - private final int value; - - /** - * Constructor - * - * @param name of VM option to test - */ - public TestDynamicVMOption(String name) { - this.name = name; - this.value = DynamicVMOptionChecker.getIntValue(name); - System.out.println(this.name + " = " + this.value); - } - - /** - * Checks that this value can accept valid percentage values and cannot accept invalid percentage values - * - * @throws RuntimeException - */ - public void testPercentageValues() { - checkInvalidValue(Integer.toString(Integer.MIN_VALUE)); - checkInvalidValue(Integer.toString(Integer.MAX_VALUE)); - checkInvalidValue("-10"); - checkInvalidValue("190"); - } - - /** - * Reads VM option from PlatformMXBean and parse it to integer value - * - * @return value - */ - public int getIntValue() { - return DynamicVMOptionChecker.getIntValue(this.name); - } - - /** - * Sets VM option value - * - * @param value to set - */ - public void setIntValue(int value) { - DynamicVMOptionChecker.setIntValue(this.name, value); - } - - /** - * Checks that this VM option is dynamically writable - * - * @throws RuntimeException if option if not writable - * @return true - */ - public boolean checkIsWritable() throws RuntimeException { - return DynamicVMOptionChecker.checkIsWritable(this.name); - } - - /** - * Checks that value for this VM option cannot be set - * - * @param value to check - * @throws RuntimeException on error - when expected exception hasn't been thrown - */ - public void checkInvalidValue(String value) { - DynamicVMOptionChecker.checkInvalidValue(this.name, value); - } - - /** - * Checks that value for this VM option can be set - * - * @param value to check - * @throws RuntimeException on error - when value in VM is not equal to origin - */ - public void checkValidValue(String value) { - DynamicVMOptionChecker.checkValidValue(this.name, value); - } - -} --- ./hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Mon Dec 08 12:28:35 2014 -0800 +++ ./hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Wed Feb 04 12:14:39 2015 -0800 @@ -25,7 +25,13 @@ package sun.hotspot; import java.lang.reflect.Executable; +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Stream; import java.security.BasicPermission; +import java.net.URL; + import sun.hotspot.parser.DiagnosticCommand; public class WhiteBox { @@ -69,6 +75,8 @@ // Memory public native long getObjectAddress(Object o); public native int getHeapOopSize(); + public native boolean isObjectInOldGen(Object o); + public native long getObjectSize(Object o); // Runtime // Make sure class name is in the correct format @@ -77,6 +85,15 @@ } private native boolean isClassAlive0(String name); + // Resource/Class Lookup Cache + public native boolean classKnownToNotExist(ClassLoader loader, String name); + public native URL[] getLookupCacheURLs(ClassLoader loader); + public native int[] getLookupCacheMatches(ClassLoader loader, String name); + + // JVMTI + public native void addToBootstrapClassLoaderSearch(String segment); + public native void addToSystemClassLoaderSearch(String segment); + // G1 public native boolean g1InConcurrentMark(); public native boolean g1IsHumongous(Object o); @@ -91,8 +108,10 @@ public native void NMTCommitMemory(long addr, long size); public native void NMTUncommitMemory(long addr, long size); public native void NMTReleaseMemory(long addr, long size); - public native boolean NMTWaitForDataMerge(); + public native long NMTMallocWithPseudoStack(long size, int index); public native boolean NMTIsDetailSupported(); + public native boolean NMTChangeTrackingLevel(); + public native int NMTGetHashSize(); // Compiler public native void deoptimizeAll(); @@ -129,7 +148,7 @@ } public native int getCompileQueueSize(int compLevel); public native boolean testSetForceInlineMethod(Executable method, boolean value); - public boolean enqueueMethodForCompilation(Executable method, int compLevel) { + public boolean enqueueMethodForCompilation(Executable method, int compLevel) { return enqueueMethodForCompilation(method, compLevel, -1 /*InvocationEntryBci*/); } public native boolean enqueueMethodForCompilation(Executable method, int compLevel, int entry_bci); @@ -142,6 +161,13 @@ // Memory public native void readReservedMemory(); + public native long allocateMetaspace(ClassLoader classLoader, long size); + public native void freeMetaspace(ClassLoader classLoader, long addr, long size); + public native long incMetaspaceCapacityUntilGC(long increment); + public native long metaspaceCapacityUntilGC(); + + // force Young GC + public native void youngGC(); // force Full GC public native void fullGC(); @@ -150,8 +176,49 @@ public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations); public native void runMemoryUnitTests(); public native void readFromNoaccessArea(); + public native long getThreadStackSize(); + public native long getThreadRemainingStackSize(); // CPU features public native String getCPUFeatures(); + // Native extensions + public native long getHeapUsageForContext(int context); + public native long getHeapRegionCountForContext(int context); + public native int getContextForObject(Object obj); + public native void printRegionInfo(int context); + + // VM flags + public native void setBooleanVMFlag(String name, boolean value); + public native void setIntxVMFlag(String name, long value); + public native void setUintxVMFlag(String name, long value); + public native void setUint64VMFlag(String name, long value); + public native void setStringVMFlag(String name, String value); + public native void setDoubleVMFlag(String name, double value); + public native Boolean getBooleanVMFlag(String name); + public native Long getIntxVMFlag(String name); + public native Long getUintxVMFlag(String name); + public native Long getUint64VMFlag(String name); + public native String getStringVMFlag(String name); + public native Double getDoubleVMFlag(String name); + private final List> flagsGetters = Arrays.asList( + this::getBooleanVMFlag, this::getIntxVMFlag, this::getUintxVMFlag, + this::getUint64VMFlag, this::getStringVMFlag, this::getDoubleVMFlag); + + public Object getVMFlag(String name) { + return flagsGetters.stream() + .map(f -> f.apply(name)) + .filter(x -> x != null) + .findAny() + .orElse(null); + } + public native int getOffsetForName0(String name); + public int getOffsetForName(String name) throws Exception { + int offset = getOffsetForName0(name); + if (offset == -1) { + throw new RuntimeException(name + " not found"); + } + return offset; + } + } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/testlibrary_tests/whitebox/vm_flags/BooleanTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test BooleanTest + * @bug 8028756 + * @library /testlibrary /testlibrary/whitebox + * @build BooleanTest + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI BooleanTest + * @summary testing of WB::set/getBooleanVMFlag() + * @author igor.ignatyev@oracle.com + */ + +import sun.hotspot.WhiteBox; +import com.oracle.java.testlibrary.*; +import sun.management.*; +import com.sun.management.*; + +public class BooleanTest { + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + private static final Boolean[] TESTS = {true, false, true, true, false}; + private static final String TEST_NAME = "BooleanTest"; + private static final String FLAG_NAME = "PrintCompilation"; + private static final String METHOD = TEST_NAME + "::method"; + private static final String METHOD1 = METHOD + "1"; + private static final String METHOD2 = METHOD + "2"; + + public static void main(String[] args) throws Exception { + if (args.length == 0) { + VmFlagTest.runTest(FLAG_NAME, TESTS, + VmFlagTest.WHITE_BOX::setBooleanVMFlag, + VmFlagTest.WHITE_BOX::getBooleanVMFlag); + testFunctional(false); + testFunctional(true); + } else { + boolean value = Boolean.valueOf(args[0]); + method1(); + VmFlagTest.WHITE_BOX.setBooleanVMFlag(FLAG_NAME, value); + method2(); + } + } + + private static void testFunctional(boolean value) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Xbootclasspath/a:.", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+WhiteBoxAPI", + "-Xcomp", + "-XX:CompileCommand=compileonly," + METHOD + "*", + "-XX:" + (value ? "-" : "+") + FLAG_NAME, + TEST_NAME, + "" + value); + OutputAnalyzer out = new OutputAnalyzer(pb.start()); + if (value) { + out.shouldNotContain(METHOD1); + out.shouldContain(METHOD2); + } else { + out.shouldContain(METHOD1); + out.shouldNotContain(METHOD2); + } + } + + private static void method1() { } + private static void method2() { } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/testlibrary_tests/whitebox/vm_flags/DoubleTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test DoubleTest + * @bug 8028756 + * @library /testlibrary /testlibrary/whitebox + * @build DoubleTest + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DoubleTest + * @summary testing of WB::set/getDoubleVMFlag() + * @author igor.ignatyev@oracle.com + */ + +public class DoubleTest { + private static final String FLAG_NAME = null; + private static final Double[] TESTS = {0d, -0d, -1d, 1d, + Double.MAX_VALUE, Double.MIN_VALUE, Double.NaN, + Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY}; + + public static void main(String[] args) throws Exception { + VmFlagTest.runTest(FLAG_NAME, TESTS, + VmFlagTest.WHITE_BOX::setDoubleVMFlag, + VmFlagTest.WHITE_BOX::getDoubleVMFlag); + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/testlibrary_tests/whitebox/vm_flags/IntxTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test IntxTest + * @bug 8028756 + * @library /testlibrary /testlibrary/whitebox + * @build IntxTest + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI IntxTest + * @summary testing of WB::set/getIntxVMFlag() + * @author igor.ignatyev@oracle.com + */ + +public class IntxTest { + private static final String FLAG_NAME = "OnStackReplacePercentage"; + private static final Long[] TESTS = {0L, 100L, -1L, + (long) Integer.MAX_VALUE, (long) Integer.MIN_VALUE}; + + public static void main(String[] args) throws Exception { + VmFlagTest.runTest(FLAG_NAME, TESTS, + VmFlagTest.WHITE_BOX::setIntxVMFlag, + VmFlagTest.WHITE_BOX::getIntxVMFlag); + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/testlibrary_tests/whitebox/vm_flags/StringTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test StringTest + * @bug 8028756 + * @library /testlibrary /testlibrary/whitebox + * @build StringTest + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI StringTest + * @summary testing of WB::set/getStringVMFlag() + * @author igor.ignatyev@oracle.com + */ + +public class StringTest { + private static final String FLAG_NAME = "CompileOnly"; + private static final String[] TESTS = {"StringTest::*", ""}; + + public static void main(String[] args) throws Exception { + VmFlagTest.runTest(FLAG_NAME, TESTS, + VmFlagTest.WHITE_BOX::setStringVMFlag, + VmFlagTest.WHITE_BOX::getStringVMFlag); + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/testlibrary_tests/whitebox/vm_flags/Uint64Test.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test Uint64Test + * @bug 8028756 + * @library /testlibrary /testlibrary/whitebox + * @build Uint64Test + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI Uint64Test + * @summary testing of WB::set/getUint64VMFlag() + * @author igor.ignatyev@oracle.com + */ + +public class Uint64Test { + private static final String FLAG_NAME = "MaxRAM"; + private static final Long[] TESTS = {0L, 100L, (long) Integer.MAX_VALUE, + -1L, Long.MAX_VALUE, Long.MIN_VALUE}; + + public static void main(String[] args) throws Exception { + VmFlagTest.runTest(FLAG_NAME, TESTS, + VmFlagTest.WHITE_BOX::setUint64VMFlag, + VmFlagTest.WHITE_BOX::getUint64VMFlag); + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/testlibrary_tests/whitebox/vm_flags/UintxTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test UintxTest + * @bug 8028756 + * @library /testlibrary /testlibrary/whitebox + * @build UintxTest + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI UintxTest + * @summary testing of WB::set/getUintxVMFlag() + * @author igor.ignatyev@oracle.com + */ +import com.oracle.java.testlibrary.Platform; + +public class UintxTest { + private static final String FLAG_NAME = "TypeProfileLevel"; + private static final Long[] TESTS = {0L, 100L, (long) Integer.MAX_VALUE, + (1L << 32L) - 1L, 1L << 32L}; + private static final Long[] EXPECTED_64 = TESTS; + private static final Long[] EXPECTED_32 = {0L, 100L, + (long) Integer.MAX_VALUE, (1L << 32L) - 1L, 0L}; + + public static void main(String[] args) throws Exception { + VmFlagTest.runTest(FLAG_NAME, TESTS, + Platform.is64bit() ? EXPECTED_64 : EXPECTED_32, + VmFlagTest.WHITE_BOX::setUintxVMFlag, + VmFlagTest.WHITE_BOX::getUintxVMFlag); + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./hotspot/test/testlibrary_tests/whitebox/vm_flags/VmFlagTest.java Wed Feb 04 12:14:39 2015 -0800 @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.Objects; +import java.util.function.BiConsumer; +import java.util.function.Function; +import sun.hotspot.WhiteBox; +import sun.management.*; +import com.sun.management.*; +import com.oracle.java.testlibrary.*; + +public final class VmFlagTest { + public static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + + private static final String NONEXISTENT_FLAG = "NonexistentFlag"; + private final String flagName; + private final BiConsumer test; + private final BiConsumer set; + private final Function get; + + protected VmFlagTest(String flagName, BiConsumer set, + Function get, boolean isPositive) { + this.flagName = flagName; + this.set = set; + this.get = get; + if (isPositive) { + test = this::testPositive; + } else { + test = this::testNegative; + } + } + + private void setNewValue(T value) { + set.accept(flagName, value); + } + + private T getValue() { + T t = get.apply(flagName); + System.out.println("T = " + t); + return t; + } + + protected static void runTest(String existentFlag, T[] tests, + BiConsumer set, Function get) { + runTest(existentFlag, tests, tests, set, get); + } + + protected static void runTest(String existentFlag, T[] tests, + T[] results, BiConsumer set, Function get) { + if (existentFlag != null) { + new VmFlagTest(existentFlag, set, get, true).test(tests, results); + } + new VmFlagTest(NONEXISTENT_FLAG, set, get, false).test(tests, results); + } + + public final void test(T[] tests, T[] results) { + Asserts.assertEQ(tests.length, results.length, "[TESTBUG] tests.length != results.length"); + for (int i = 0, n = tests.length ; i < n; ++i) { + test.accept(tests[i], results[i]); + } + } + + protected String getVMOptionAsString() { + HotSpotDiagnosticMXBean diagnostic + = ManagementFactoryHelper.getDiagnosticMXBean(); + VMOption tmp; + try { + tmp = diagnostic.getVMOption(flagName); + } catch (IllegalArgumentException e) { + tmp = null; + } + return tmp == null ? null : tmp.getValue(); + } + + private void testPositive(T value, T expected) { + Asserts.assertEQ(getVMOptionAsString(), asString(getValue())); + setNewValue(value); + String newValue = getVMOptionAsString(); + Asserts.assertEQ(newValue, asString(expected)); + Asserts.assertEQ(getVMOptionAsString(), asString(getValue())); + } + + private void testNegative(T value, T expected) { + String oldValue = getVMOptionAsString(); + Asserts.assertEQ(oldValue, asString(getValue())); + setNewValue(value); + String newValue = getVMOptionAsString(); + Asserts.assertEQ(oldValue, newValue); + } + + private String asString(Object value) { + return value == null ? null : "" + value; + } +} + --- ./jaxp/.hgtags Mon Dec 08 12:29:23 2014 -0800 +++ ./jaxp/.hgtags Wed Feb 04 12:14:41 2015 -0800 @@ -349,3 +349,29 @@ b5165ac3556e95c42a295d3cbeef8cd6e5607b25 jdk8u31-b10 f475dbc70345904bda6b520af43955e244292886 jdk8u31-b11 6563e438377f2086253577e08593b1ddfb901eff jdk8u31-b12 +1dd828fd98f1b84de5dcadb904322b711e7489ff jdk8u31-b13 +3a1bba8076da4e54882123e98e219eab1c31ccef jdk8u40-b00 +f219da378d0768ff042d77221e5d20676ecc16f0 jdk8u40-b01 +16ef2134c32a4e60b5a60105b371163aa5936278 jdk8u40-b02 +d73144ee471d4d4c49db94b3cc8990479961499a jdk8u40-b03 +23c1a41d75adc6fc57f69528c2c804079f449d94 jdk8u40-b04 +8d60cebf6a0c1cf1a5669e49deb795a1445e826b jdk8u40-b05 +50a2adfa57fcbbe1587991d53f58d227070a24db jdk8u40-b06 +b2210de1587b16ca1d1d781b55e19bc70724c62b jdk8u40-b07 +641eb6543c7186d1c2c26eaf96803b1e6b411d9a jdk8u40-b08 +c45c0ee4160026207cef1850c215c2691d43a97d jdk8u40-b09 +1f5248bc0714a45160525d48bfcf455b9cdf86a3 jdk8u40-b10 +0bf6dbb6ac4e2258d4d832bb949cb1be2402a899 jdk8u40-b11 +b0cde44bdbfd6339128ca5a7dbbbffaaecac793a jdk8u40-b12 +9df027c40f0fe150f609f1a5e731b7e649923f2b jdk8u40-b13 +d8886dede94dedcad66e6d770dac6b8bca1eb51d jdk8u40-b14 +ed46c05af2570feed5ea264eca02f223055109e9 jdk8u40-b15 +cda81fbeec94b31dba27a043fbf58f442524cdf5 jdk8u40-b16 +cb63029168a52d62d82c3325f1092405c318e78c jdk8u40-b17 +6103f5a8119a85937ae006f18b8dfc04f73315d0 jdk8u40-b18 +3b73732d6886dc8155f0c1fbb125ca60d9e2fd2b jdk8u40-b19 +7bfc889330e0ec1fd495990eaa0d7f0c390b7304 jdk8u40-b20 +78d90db9de2801eec010ccb9f0db3caf969dfc3b jdk8u40-b21 +54a13451ce243f2159ed3996e6efcf374a5750ca jdk8u40-b22 +e07fbae1efeac4e50514384caa7d226af7414114 jdk8u40-b23 +048cebd17f73f23ce2295e360f31c1b6788195aa jdk8u40-b24 --- ./jaxp/THIRD_PARTY_README Mon Dec 08 12:29:23 2014 -0800 +++ ./jaxp/THIRD_PARTY_README Wed Feb 04 12:14:41 2015 -0800 @@ -3385,7 +3385,7 @@ included with JRE 8, JDK 8, and OpenJDK 8. Apache Commons Math 3.2 - Apache Derby 10.10.1.3 + Apache Derby 10.11.1.2 Apache Jakarta BCEL 5.1 Apache Jakarta Regexp 1.4 Apache Santuario XML Security for Java 1.5.4 --- ./jaxp/src/com/sun/org/apache/bcel/internal/classfile/DescendingVisitor.java Mon Dec 08 12:29:23 2014 -0800 +++ ./jaxp/src/com/sun/org/apache/bcel/internal/classfile/DescendingVisitor.java Wed Feb 04 12:14:41 2015 -0800 @@ -213,6 +213,10 @@ public void visitLocalVariableTypeTable(LocalVariableTypeTable obj) { stack.push(obj); obj.accept(visitor); + + LocalVariable[] vars = obj.getLocalVariableTypeTable(); + for(int i=0; i < vars.length; i++) + vars[i].accept(this); stack.pop(); } --- ./jaxp/src/com/sun/org/apache/bcel/internal/generic/MethodGen.java Mon Dec 08 12:29:23 2014 -0800 +++ ./jaxp/src/com/sun/org/apache/bcel/internal/generic/MethodGen.java Wed Feb 04 12:14:41 2015 -0800 @@ -87,6 +87,7 @@ private boolean strip_attributes; private ArrayList variable_vec = new ArrayList(); + private ArrayList type_vec = new ArrayList(); private ArrayList line_number_vec = new ArrayList(); private ArrayList exception_vec = new ArrayList(); private ArrayList throws_vec = new ArrayList(); @@ -260,7 +261,7 @@ } } else if (a instanceof LocalVariableTypeTable) { LocalVariable[] lv = ((LocalVariableTypeTable) a).getLocalVariableTypeTable(); - removeLocalVariables(); + removeLocalVariableTypes(); for (int k = 0; k < lv.length; k++) { LocalVariable l = lv[k]; InstructionHandle start = il.findHandle(l.getStartPC()); @@ -272,7 +273,7 @@ if (null == end) { end = il.getEnd(); } - addLocalVariable(l.getName(), Type.getType(l.getSignature()), l + addLocalVariableType(l.getName(), Type.getType(l.getSignature()), l .getIndex(), start, end); } } else @@ -406,6 +407,31 @@ return lg; } + /* + * If the range of the variable has not been set yet, it will be set to be + * val id from the start to the end of the instruction list. + * + * @return array of declared local variable types sorted by index + */ + private LocalVariableGen[] getLocalVariableTypes() { + int size = type_vec.size(); + LocalVariableGen[] lg = new LocalVariableGen[size]; + type_vec.toArray(lg); + + for(int i=0; i < size; i++) { + if(lg[i].getStart() == null) + lg[i].setStart(il.getStart()); + + if(lg[i].getEnd() == null) + lg[i].setEnd(il.getEnd()); + } + + if(size > 1) + sort(lg, 0, size - 1); + + return lg; + } + /** * @return `LocalVariableTable' attribute of all the local variables of this method. */ @@ -422,6 +448,68 @@ } /** + * @return `LocalVariableTypeTable' attribute of all the local variable + * types of this method. + */ + public LocalVariableTypeTable getLocalVariableTypeTable(ConstantPoolGen cp) { + LocalVariableGen[] lg = getLocalVariableTypes(); + int size = lg.length; + LocalVariable[] lv = new LocalVariable[size]; + + for(int i=0; i < size; i++) + lv[i] = lg[i].getLocalVariable(cp); + + return new LocalVariableTypeTable(cp.addUtf8("LocalVariableTypeTable"), + 2 + lv.length * 10, lv, cp.getConstantPool()); + } + + /** + * Adds a local variable type to this method. + * + * @param name variable name + * @param type variable type + * @param slot the index of the local variable, if type is long or double, the next available + * index is slot+2 + * @param start from where the variable is valid + * @param end until where the variable is valid + * @return new local variable object + * @see LocalVariable + */ + private LocalVariableGen addLocalVariableType(String name, Type type, int slot, + InstructionHandle start, + InstructionHandle end) { + byte t = type.getType(); + + if(t != Constants.T_ADDRESS) { + int add = type.getSize(); + + if(slot + add > max_locals) + max_locals = slot + add; + + LocalVariableGen l = new LocalVariableGen(slot, name, type, start, end); + int i; + + if((i = type_vec.indexOf(l)) >= 0) // Overwrite if necessary + type_vec.set(i, l); + else + type_vec.add(l); + + return l; + } else { + throw new IllegalArgumentException("Can not use " + type + + " as type for local variable"); + + } + } + + /** + * Remove all local variable types. + */ + private void removeLocalVariableTypes() { + type_vec.clear(); + } + + /** * Give an instruction a line number corresponding to the source code line. * * @param ih instruction to tag @@ -637,12 +725,17 @@ LineNumberTable lnt = null; LocalVariableTable lvt = null; + LocalVariableTypeTable lvtt = null; - /* Create LocalVariableTable and LineNumberTable attributes (for debuggers, e.g.) + /* Create LocalVariableTable, LocalvariableTypeTable, and LineNumberTable + * attributes (for debuggers, e.g.) */ if((variable_vec.size() > 0) && !strip_attributes) addCodeAttribute(lvt = getLocalVariableTable(cp)); + if((type_vec.size() > 0) && !strip_attributes) + addCodeAttribute(lvtt = getLocalVariableTypeTable(cp)); + if((line_number_vec.size() > 0) && !strip_attributes) addCodeAttribute(lnt = getLineNumberTable(cp)); @@ -691,6 +784,7 @@ // Undo effects of adding attributes if(lvt != null) removeCodeAttribute(lvt); + if(lvtt != null) removeCodeAttribute(lvtt); if(lnt != null) removeCodeAttribute(lnt); if(code != null) removeAttribute(code); if(et != null) removeAttribute(et); --- ./jaxp/src/com/sun/org/apache/xerces/internal/jaxp/validation/XMLSchemaFactory.java Mon Dec 08 12:29:23 2014 -0800 +++ ./jaxp/src/com/sun/org/apache/xerces/internal/jaxp/validation/XMLSchemaFactory.java Wed Feb 04 12:14:41 2015 -0800 @@ -453,7 +453,8 @@ } private void propagateFeatures(AbstractXMLSchema schema) { - schema.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, fSecurityManager != null); + schema.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, + (fSecurityManager != null && fSecurityManager.isSecureProcessing())); schema.setFeature(Constants.ORACLE_FEATURE_SERVICE_MECHANISM, fUseServicesMechanism); String[] features = fXMLSchemaLoader.getRecognizedFeatures(); for (int i = 0; i < features.length; ++i) { --- ./jaxp/src/com/sun/org/apache/xml/internal/dtm/ref/sax2dtm/SAX2DTM2.java Mon Dec 08 12:29:23 2014 -0800 +++ ./jaxp/src/com/sun/org/apache/xml/internal/dtm/ref/sax2dtm/SAX2DTM2.java Wed Feb 04 12:14:41 2015 -0800 @@ -3145,7 +3145,11 @@ m_data.elementAt(-dataIndex+1)); } } - else if (DTM.ELEMENT_NODE == type || DTM.DOCUMENT_FRAGMENT_NODE == type + else if (DTM.ELEMENT_NODE == type) + { + return getStringValueX(nodeHandle); + } + else if (DTM.DOCUMENT_FRAGMENT_NODE == type || DTM.DOCUMENT_NODE == type) { return null; --- ./jaxp/src/com/sun/org/apache/xml/internal/resolver/readers/DOMCatalogReader.java Mon Dec 08 12:29:23 2014 -0800 +++ ./jaxp/src/com/sun/org/apache/xml/internal/resolver/readers/DOMCatalogReader.java Wed Feb 04 12:14:41 2015 -0800 @@ -57,7 +57,6 @@ *

The selection of CatalogParsers is made on the basis of the QName * of the root element of the document.

* - *

This class requires the Java API for XML Parsing.

* * @see Catalog * @see CatalogReader --- ./jaxp/src/org/w3c/dom/package.html Mon Dec 08 12:29:23 2014 -0800 +++ ./jaxp/src/org/w3c/dom/package.html Wed Feb 04 12:14:41 2015 -0800 @@ -3,12 +3,10 @@ org.w3c.dom package -Provides the interfaces for the Document Object Model (DOM) which is a -component API of the Java API for XML -Processing. The Document Object Model Level 2 Core API allows programs -to dynamically access and update the content and structure of documents. -See the specification -for more information. +Provides the interfaces for the Document Object Model (DOM). Supports the +Document Object Model Level 2 Core APIi, +Document Object Model (DOM) Level 3 Core, +and Document Object Model (DOM) Level 3 Load and Save. @since JDK1.4 --- ./jaxws/.hgtags Mon Dec 08 12:29:31 2014 -0800 +++ ./jaxws/.hgtags Wed Feb 04 12:14:42 2015 -0800 @@ -347,3 +347,29 @@ dd0467f3fe130884849ad8fb226d76f02b4cbde4 jdk8u31-b10 497c783d228ed188d61964edd409794af3ad3e5c jdk8u31-b11 959e8fca46155528c8147da69a7c49edfb002cb1 jdk8u31-b12 +9d0c737694ece23547c0a27dcd0ba6cbcdf577f2 jdk8u31-b13 +31d43d250c836c13fcc87025837783788c5cd0de jdk8u40-b00 +262fb5353ffa661f88b4a9cf2581fcad8c2a43f7 jdk8u40-b01 +8043f77ef8a4ded9505269a356c4e2f4f9604cd9 jdk8u40-b02 +27bcab54f36afc2340adf525fa2a8de1b1c356ca jdk8u40-b03 +127fb2a65ca6d09bb59ff8030aac11c67f3da4d8 jdk8u40-b04 +b904fcd66860c2b069493f989b3347241266407d jdk8u40-b05 +52ae3094de1ec6974ff0250b8cabc13f110290d1 jdk8u40-b06 +3857b4b27e22cfd3f970cd77ed1a41d44444202f jdk8u40-b07 +304ea93428f83d55a558a76ebbcc318d07c03fbe jdk8u40-b08 +26529be4ae77192acf99c867f0c2a75a7ad71f28 jdk8u40-b09 +337fb10bc4da77f31b3ba3049d45180a8c215f97 jdk8u40-b10 +475f12001625b16230f29a96b6371b3cd2e955dd jdk8u40-b11 +d78fb9203a2782842810ff0197f2ce92d364a8ea jdk8u40-b12 +42a61f4bdca3f7f919b7f8aeb5cad961dc2d1660 jdk8u40-b13 +d22a374ce8569a89301b6c07301e0a803af024e7 jdk8u40-b14 +a278e39ba58d104d0731db5ab5ea6cedf3084a1d jdk8u40-b15 +8893690584dac2df7cc2e9b0a5ffe107266a160b jdk8u40-b16 +83c4d5aca2ff8fd0c6b2a7091018b71313371176 jdk8u40-b17 +fa07311627d085f1307f55298f59463bcf55db02 jdk8u40-b18 +c8b402c28fe51e25f3298e1266f2ae48bda8d3e0 jdk8u40-b19 +a21c4edfdf4402f027183ac8c8aac2db49df3b7d jdk8u40-b20 +16485a38b6bc762b363f4e439047486742fbcfcb jdk8u40-b21 +6e928fd9152541eddf25694be89eb881434a5c5f jdk8u40-b22 +b6755a463ccf6a79b1e1a43ed7bdb1c5cb1ac17d jdk8u40-b23 +5fbbfd66643edb81cfa0688825d698dcc5f2eb11 jdk8u40-b24 --- ./jaxws/THIRD_PARTY_README Mon Dec 08 12:29:31 2014 -0800 +++ ./jaxws/THIRD_PARTY_README Wed Feb 04 12:14:42 2015 -0800 @@ -3385,7 +3385,7 @@ included with JRE 8, JDK 8, and OpenJDK 8. Apache Commons Math 3.2 - Apache Derby 10.10.1.3 + Apache Derby 10.11.1.2 Apache Jakarta BCEL 5.1 Apache Jakarta Regexp 1.4 Apache Santuario XML Security for Java 1.5.4 --- ./jdk/.hgtags Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/.hgtags Wed Feb 04 12:14:43 2015 -0800 @@ -350,3 +350,29 @@ 291505d802d9075e227f9ee865a67234e1d737cf jdk8u31-b10 a21dd7999d1e4ba612c951c2c78504d23eb7243a jdk8u31-b11 6a12f34816d2ee12368274fc21225384a8893426 jdk8u31-b12 +1fbdd5d80d0671decd8acb5adb64866f609e986f jdk8u31-b13 +e6ed015afbbf3459ba3297e270b4f3170e989c80 jdk8u40-b00 +6e223d48080ef40f4ec11ecbcd19b4a20813b9eb jdk8u40-b01 +4797cd0713b44b009525f1276d571ade7e24f3f5 jdk8u40-b02 +c67acfb24eed87629887128df51007218ddf1f60 jdk8u40-b03 +dde62d949f7847469b2ede2ca4190c95066adc91 jdk8u40-b04 +d587834579dadd18cb8b096e61d92e2dbccc2782 jdk8u40-b05 +25788892a6723c0742a24050cc25ab103d9804de jdk8u40-b06 +07f0e22b5c238dd7b89fedbed35f02ac6b392c96 jdk8u40-b07 +0f0d70abca09b4ddb0981204ad5a427d4ce935e9 jdk8u40-b08 +064adeb65ce82f9ff3cc7898e59d19eb64743c63 jdk8u40-b09 +c3a4729c70fa29d79ad77e0643ad7715ebbc96b5 jdk8u40-b10 +693da296b395139f2fe6d7131eb0b0d85f6015f6 jdk8u40-b11 +fb8db13639204e37388904bb6e57778c5d762631 jdk8u40-b12 +ba80109a9b3eb92b56012c9ec3aafd9aee2efa69 jdk8u40-b13 +ffc348308de2e872f5d510d440604c3726a67a18 jdk8u40-b14 +31dac938108da722c56a0526fba7f6ae84773056 jdk8u40-b15 +9dc67d03e6e540f646f27092ed23e94e95fa789e jdk8u40-b16 +fc4f5546417071c70cffd89ca83302309f6f7da9 jdk8u40-b17 +20a3e2135e0867e55af72f0c66a3de558bc613e2 jdk8u40-b18 +5c31204d19e5976f025026db3d5c17331e8c44db jdk8u40-b19 +7784dab075ed82be2275f4694164bbb9cc1cde3f jdk8u40-b20 +564bca490631e4ed4f7993e6633ed9ee62067624 jdk8u40-b21 +d168113f9841a77b3cee3a6a45fcd85b7351ac90 jdk8u40-b22 +41fe61722ce96b75dd3a1ba5072473122e21e5a0 jdk8u40-b23 +9d903721276c8684706db7ecfb6cda568e9f4f69 jdk8u40-b24 --- ./jdk/THIRD_PARTY_README Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/THIRD_PARTY_README Wed Feb 04 12:14:43 2015 -0800 @@ -3385,7 +3385,7 @@ included with JRE 8, JDK 8, and OpenJDK 8. Apache Commons Math 3.2 - Apache Derby 10.10.1.3 + Apache Derby 10.11.1.2 Apache Jakarta BCEL 5.1 Apache Jakarta Regexp 1.4 Apache Santuario XML Security for Java 1.5.4 --- ./jdk/make/CompileDemos.gmk Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/CompileDemos.gmk Wed Feb 04 12:14:43 2015 -0800 @@ -456,7 +456,7 @@ ################################################################################################## ifndef OPENJDK - DB_DEMO_ZIPFILE := $(wildcard $(JDK_TOPDIR)/src/closed/share/db/*.zip) + DB_DEMO_ZIPFILE := $(wildcard $(JDK_TOPDIR)/src/closed/share/db/db-derby-*-bin.zip) $(JDK_OUTPUTDIR)/demo/_the.db.unzipped: $(DB_DEMO_ZIPFILE) $(MKDIR) -p $(@D) --- ./jdk/make/CompileJavaClasses.gmk Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/CompileJavaClasses.gmk Wed Feb 04 12:14:43 2015 -0800 @@ -37,6 +37,11 @@ EXCLUDES := ########################################################################################## +# Include the corresponding custom file, if present. + +-include $(CUSTOM_MAKE_DIR)/CompileJavaClasses.gmk + +########################################################################################## EXCLUDES += com/sun/pept \ com/sun/tools/example/trace \ @@ -52,8 +57,8 @@ endif ifndef OPENJDK - # There exists two versions of this file... - EXFILES := $(JDK_TOPDIR)/src/share/classes/javax/crypto/JarVerifier.java + # There exists two versions of these files... + EXFILES += $(JDK_TOPDIR)/src/share/classes/javax/crypto/JarVerifier.java ifeq ($(OPENJDK_TARGET_OS), windows) # This gets built on unix platforms implicitly in the old build even though --- ./jdk/make/CompileNativeLibraries.gmk Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/CompileNativeLibraries.gmk Wed Feb 04 12:14:43 2015 -0800 @@ -87,6 +87,9 @@ include lib/SoundLibraries.gmk +# Include the corresponding custom file, if present. +-include $(CUSTOM_MAKE_DIR)/CompileNativeLibraries.gmk + ########################################################################################## all: $(COPY_FILES) $(BUILD_LIBRARIES) --- ./jdk/make/Images.gmk Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/Images.gmk Wed Feb 04 12:14:43 2015 -0800 @@ -562,7 +562,7 @@ $(CAT) $< | $(SED) "s/XXXX/$(shell cat $(JDK_TOPDIR)/src/closed/share/db/COPYRIGHTYEAR)/" > $@ JDK_DB_TARGETS := $(patsubst $(JDK_TOPDIR)/src/closed/share/db/%, $(IMAGES_OUTPUTDIR)/_unzip/%.unzipped, \ - $(wildcard $(JDK_TOPDIR)/src/closed/share/db/*.zip)) \ + $(wildcard $(JDK_TOPDIR)/src/closed/share/db/db-derby-*-bin.zip)) \ $(JDK_IMAGE_DIR)/db/README-JDK.html $(JDK_IMAGE_DIR)/db/3RDPARTY endif --- ./jdk/make/Setup.gmk Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/Setup.gmk Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ # boot jdk to generate tools that need to be run with the boot jdk. # Thus we force the target bytecode to the previous JDK version. $(eval $(call SetupJavaCompiler,GENERATE_OLDBYTECODE, \ - JVM := $(JAVA), \ + JVM := $(JAVA_SMALL), \ JAVAC := $(NEW_JAVAC), \ FLAGS := $(BOOT_JDK_SOURCETARGET) -bootclasspath $(BOOT_RTJAR) $(DISABLE_WARNINGS), \ SERVER_DIR := $(SJAVAC_SERVER_DIR), \ @@ -60,7 +60,7 @@ # I.e. the rt.jar, but since rt.jar has not yet been generated # (it will be in "make images") therefore we use classes instead. $(eval $(call SetupJavaCompiler,GENERATE_USINGJDKBYTECODE, \ - JVM := $(JAVA), \ + JVM := $(JAVA_SMALL), \ JAVAC := $(NEW_JAVAC), \ FLAGS := -bootclasspath $(JDK_OUTPUTDIR)/classes $(DISABLE_WARNINGS), \ SERVER_DIR := $(SJAVAC_SERVER_DIR), \ --- ./jdk/make/Tools.gmk Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/Tools.gmk Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -54,84 +54,85 @@ BUILD_TOOLS += $(JDK_OUTPUTDIR)/btclasses/build/tools/deps/refs.allowed # Add a checksum ("jsum") to the end of a text file. Prevents trivial tampering with class lists. -TOOL_ADDJSUM = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_ADDJSUM = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.addjsum.AddJsum # The buildmetaindex tool creates a meta-index to make core class loaders lazier. -TOOL_BUILDMETAINDEX = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_BUILDMETAINDEX = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.buildmetaindex.BuildMetaIndex -TOOL_COMPILEFONTCONFIG = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_COMPILEFONTCONFIG = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.compilefontconfig.CompileFontConfig -TOOL_COMPILEPROPERTIES = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_COMPILEPROPERTIES = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.compileproperties.CompileProperties -TOOL_STRIPPROPERTIES = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_STRIPPROPERTIES = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.stripproperties.StripProperties -TOOL_JARREORDER = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_JARREORDER = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.jarreorder.JarReorder -TOOL_GENERATECHARACTER = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_GENERATECHARACTER = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.generatecharacter.GenerateCharacter -TOOL_CHARACTERNAME = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_CHARACTERNAME = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.generatecharacter.CharacterName -TOOL_DTDBUILDER = $(JAVA) -Ddtd_home=$(JDK_TOPDIR)/make/data/dtdbuilder \ +TOOL_DTDBUILDER = $(JAVA_SMALL) -Ddtd_home=$(JDK_TOPDIR)/make/data/dtdbuilder \ + -Djava.awt.headless=true \ -cp $(JDK_OUTPUTDIR)/btclasses build.tools.dtdbuilder.DTDBuilder -TOOL_GENERATEBREAKITERATORDATA = $(JAVA) \ +TOOL_GENERATEBREAKITERATORDATA = $(JAVA_SMALL) \ -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.generatebreakiteratordata.GenerateBreakIteratorData -TOOL_GENERATECURRENCYDATA = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_GENERATECURRENCYDATA = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.generatecurrencydata.GenerateCurrencyData -TOOL_HASHER = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_HASHER = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.hasher.Hasher -TOOL_TZDB = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_TZDB = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.tzdb.TzdbZoneRulesCompiler # TODO: There are references to the jdwpgen.jar in jdk/make/netbeans/jdwpgen/build.xml # and nbproject/project.properties in the same dir. Needs to be looked at. -TOOL_JDWPGEN = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses build.tools.jdwpgen.Main +TOOL_JDWPGEN = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses build.tools.jdwpgen.Main # TODO: Lots of files in jdk/make/tools/CharsetMapping dir -TOOL_CHARSETMAPPING = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_CHARSETMAPPING = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.charsetmapping.Main -TOOL_SPP = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses build.tools.spp.Spp +TOOL_SPP = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses build.tools.spp.Spp # Nimbus is used somewhere in the swing build. -TOOL_GENERATENIMBUS = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_GENERATENIMBUS = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.generatenimbus.Generator -TOOL_WRAPPERGENERATOR = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_WRAPPERGENERATOR = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ WrapperGenerator -TOOL_AWT_TOBIN = $(JAVA) -Djava.awt.headless=true -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_AWT_TOBIN = $(JAVA_SMALL) -Djava.awt.headless=true -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.icondata.awt.ToBin -TOOL_OSX_TOBIN = $(JAVA) -Djava.awt.headless=true -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_OSX_TOBIN = $(JAVA_SMALL) -Djava.awt.headless=true -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.icondata.osxapp.ToBin -TOOL_CLDRCONVERTER = $(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ +TOOL_CLDRCONVERTER = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.cldrconverter.CLDRConverter -TOOL_REMOVEMETHODS = $(JAVA) -Xbootclasspath/p:$(LANGTOOLS_OUTPUTDIR)/dist/bootstrap/lib/javac.jar \ +TOOL_REMOVEMETHODS = $(JAVA_SMALL) -Xbootclasspath/p:$(LANGTOOLS_OUTPUTDIR)/dist/bootstrap/lib/javac.jar \ -cp $(JDK_OUTPUTDIR)/btclasses:$(JDK_OUTPUTDIR) \ build.tools.classfile.RemoveMethods -TOOL_CHECKDEPS = $(JAVA) -Xbootclasspath/p:$(LANGTOOLS_OUTPUTDIR)/dist/bootstrap/lib/javac.jar \ +TOOL_CHECKDEPS = $(JAVA_SMALL) -Xbootclasspath/p:$(LANGTOOLS_OUTPUTDIR)/dist/bootstrap/lib/javac.jar \ -cp $(JDK_OUTPUTDIR)/btclasses:$(JDK_OUTPUTDIR) \ build.tools.deps.CheckDeps -TOOL_ADDTORESTRICTEDPKGS=$(JAVA) -cp $(JDK_OUTPUTDIR)/btclasses \ - build.tools.addtorestrictedpkgs.AddToRestrictedPkgs +TOOL_ADDTORESTRICTEDPKGS=$(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ + build.tools.addtorestrictedpkgs.AddToRestrictedPkgs ########################################################################################## --- ./jdk/make/data/jdwp/jdwp.spec Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/data/jdwp/jdwp.spec Wed Feb 04 12:14:43 2015 -0800 @@ -1147,7 +1147,8 @@ (ErrorSet (Error INVALID_CLASS "clazz is not the ID of a class.") (Error INVALID_OBJECT "clazz is not a known ID.") - (Error INVALID_METHODID "methodID is not the ID of a method.") + (Error INVALID_METHODID "methodID is not the ID of a static method in " + "this class type or one of its superclasses.") (Error INVALID_THREAD) (Error THREAD_NOT_SUSPENDED) (Error VM_DEAD) @@ -1250,6 +1251,83 @@ ) ) (CommandSet InterfaceType=5 + (Command InvokeMethod=1 + "Invokes a static method. " + "The method must not be a static initializer. " + "The method must be a member of the interface type. " + "

Since JDWP version 1.8 " + "

" + "The method invocation will occur in the specified thread. " + "Method invocation can occur only if the specified thread " + "has been suspended by an event. " + "Method invocation is not supported " + "when the target VM has been suspended by the front-end. " + "

" + "The specified method is invoked with the arguments in the specified " + "argument list. " + "The method invocation is synchronous; the reply packet is not " + "sent until the invoked method returns in the target VM. " + "The return value (possibly the void value) is " + "included in the reply packet. " + "If the invoked method throws an exception, the " + "exception object ID is set in the reply packet; otherwise, the " + "exception object ID is null. " + "

" + "For primitive arguments, the argument value's type must match the " + "argument's type exactly. For object arguments, there must exist a " + "widening reference conversion from the argument value's type to the " + "argument's type and the argument's type must be loaded. " + "

" + "By default, all threads in the target VM are resumed while " + "the method is being invoked if they were previously " + "suspended by an event or by a command. " + "This is done to prevent the deadlocks " + "that will occur if any of the threads own monitors " + "that will be needed by the invoked method. It is possible that " + "breakpoints or other events might occur during the invocation. " + "Note, however, that this implicit resume acts exactly like " + "the ThreadReference resume command, so if the thread's suspend " + "count is greater than 1, it will remain in a suspended state " + "during the invocation. By default, when the invocation completes, " + "all threads in the target VM are suspended, regardless their state " + "before the invocation. " + "

" + "The resumption of other threads during the invoke can be prevented " + "by specifying the INVOKE_SINGLE_THREADED " + "bit flag in the options field; however, " + "there is no protection against or recovery from the deadlocks " + "described above, so this option should be used with great caution. " + "Only the specified thread will be resumed (as described for all " + "threads above). Upon completion of a single threaded invoke, the invoking thread " + "will be suspended once again. Note that any threads started during " + "the single threaded invocation will not be suspended when the " + "invocation completes. " + "

" + "If the target VM is disconnected during the invoke (for example, through " + "the VirtualMachine dispose command) the method invocation continues. " + (Out + (interfaceType clazz "The interface type ID.") + (threadObject thread "The thread in which to invoke.") + (method methodID "The method to invoke.") + (Repeat arguments + (value arg "The argument value.") + ) + (int options "Invocation options") + ) + (Reply + (value returnValue "The returned value.") + (tagged-object exception "The thrown exception.") + ) + (ErrorSet + (Error INVALID_CLASS "clazz is not the ID of an interface.") + (Error INVALID_OBJECT "clazz is not a known ID.") + (Error INVALID_METHODID "methodID is not the ID of a static method in this " + "interface type or is the ID of a static initializer.") + (Error INVALID_THREAD) + (Error THREAD_NOT_SUSPENDED) + (Error VM_DEAD) + ) + ) ) (CommandSet Method=6 (Command LineTable=1 @@ -1543,7 +1621,7 @@ "

" "By default, all threads in the target VM are resumed while " "the method is being invoked if they were previously " - "suspended by an event or by command. " + "suspended by an event or by a command. " "This is done to prevent the deadlocks " "that will occur if any of the threads own monitors " "that will be needed by the invoked method. It is possible that " @@ -1586,7 +1664,9 @@ (Error INVALID_OBJECT) (Error INVALID_CLASS "clazz is not the ID of a reference " "type.") - (Error INVALID_METHODID "methodID is not the ID of a method.") + (Error INVALID_METHODID "methodID is not the ID of an instance method " + "in this object's type or one of its superclasses, " + "superinterfaces, or implemented interfaces.") (Error INVALID_THREAD) (Error THREAD_NOT_SUSPENDED) (Error VM_DEAD) --- ./jdk/make/gensrc/GensrcMisc.gmk Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/gensrc/GensrcMisc.gmk Wed Feb 04 12:14:43 2015 -0800 @@ -62,25 +62,6 @@ ########################################################################################## -ifeq ($(OPENJDK_TARGET_OS_API), posix) - UPSUFFIX := $(OPENJDK_TARGET_OS) - ifeq ($(OPENJDK_TARGET_OS), macosx) - UPSUFFIX := bsd - endif - # UNIXProcess.java is different for solaris and linux. We need to copy - # the correct UNIXProcess.java over to $(JDK_OUTPUTDIR)/gensrc/java/lang/. - - $(JDK_OUTPUTDIR)/gensrc/java/lang/UNIXProcess.java: \ - $(JDK_TOPDIR)/src/solaris/classes/java/lang/UNIXProcess.java.$(UPSUFFIX) - $(ECHO) $(LOG_INFO) Copying UNIXProcess.java.$(OPENJDK_TARGET_OS) to java/lang/UNIXProcess.java - $(call install-file) - $(CHMOD) u+rw $@ - - GENSRC_MISC += $(JDK_OUTPUTDIR)/gensrc/java/lang/UNIXProcess.java -endif - -########################################################################################## - GENSRC_MISC += $(JDK_OUTPUTDIR)/gensrc/sun/nio/ch/SocketOptionRegistry.java GENSRC_SOR_SRC := $(JDK_TOPDIR)/src/share/native/sun/nio/ch --- ./jdk/make/lib/Awt2dLibraries.gmk Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/lib/Awt2dLibraries.gmk Wed Feb 04 12:14:43 2015 -0800 @@ -318,7 +318,7 @@ endif ifeq ($(OPENJDK_TARGET_OS), macosx) - LIBAWT_FILES += awt_LoadLibrary.c img_colors.c + LIBAWT_FILES += awt_LoadLibrary.c LIBAWT_CFLAGS += -F/System/Library/Frameworks/JavaVM.framework/Frameworks endif @@ -611,7 +611,6 @@ debug_mem.c \ debug_trace.c \ debug_util.c \ - awt_Plugin.c \ gnome_interface.c \ gtk2_interface.c \ swing_GTKEngine.c \ --- ./jdk/make/lib/CoreLibraries.gmk Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/lib/CoreLibraries.gmk Wed Feb 04 12:14:43 2015 -0800 @@ -129,9 +129,9 @@ $(JDK_TOPDIR)/src/$(OPENJDK_TARGET_OS_API_DIR)/native/common \ $(JDK_TOPDIR)/src/$(OPENJDK_TARGET_OS_API_DIR)/native/java/util -ifneq ($(OPENJDK_TARGET_OS), macosx) +ifeq ($(OPENJDK_TARGET_OS), windows) LIBJAVA_SRC_DIRS += $(JDK_TOPDIR)/src/$(OPENJDK_TARGET_OS_API_DIR)/native/sun/util/locale/provider -else +else ifeq ($(OPENJDK_TARGET_OS), macosx) LIBJAVA_SRC_DIRS += $(JDK_TOPDIR)/src/macosx/native/sun/util/locale/provider endif --- ./jdk/make/lib/ServiceabilityLibraries.gmk Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/lib/ServiceabilityLibraries.gmk Wed Feb 04 12:14:43 2015 -0800 @@ -23,6 +23,10 @@ # questions. # +# Include custom extensions if available. +-include $(CUSTOM_MAKE_DIR)/lib/ServiceabilityLibraries.gmk + + LIBATTACH_EXCLUDE_FILES := ifneq ($(OPENJDK_TARGET_OS), solaris) LIBATTACH_EXCLUDE_FILES += SolarisVirtualMachine.c @@ -283,12 +287,12 @@ ########################################################################################## -BUILD_LIBMANAGEMENT_SRC := $(JDK_TOPDIR)/src/share/native/sun/management \ +BUILD_LIBMANAGEMENT_SRC += $(JDK_TOPDIR)/src/share/native/sun/management \ $(JDK_TOPDIR)/src/$(OPENJDK_TARGET_OS_API_DIR)/native/sun/management BUILD_LIBMANAGEMENT_EXCLUDES := -BUILD_LIBMANAGEMENT_CFLAGS := -I$(JDK_TOPDIR)/src/share/native/sun/management +BUILD_LIBMANAGEMENT_CFLAGS += -I$(JDK_TOPDIR)/src/share/native/sun/management ifneq ($(OPENJDK_TARGET_OS), solaris) BUILD_LIBMANAGEMENT_EXCLUDES += SolarisOperatingSystem.c @@ -309,6 +313,9 @@ endif endif +# Make it possible to override this variable +LIBMANAGEMENT_MAPFILE ?= $(JDK_TOPDIR)/make/mapfiles/libmanagement/mapfile-vers + $(eval $(call SetupNativeCompilation,BUILD_LIBMANAGEMENT, \ LIBRARY := management, \ OUTPUT_DIR := $(INSTALL_LIBRARIES_HERE), \ @@ -317,7 +324,7 @@ LANG := C, \ OPTIMIZATION := $(LIBMANAGEMENT_OPTIMIZATION), \ CFLAGS := $(CFLAGS_JDKLIB) $(BUILD_LIBMANAGEMENT_CFLAGS), \ - MAPFILE := $(JDK_TOPDIR)/make/mapfiles/libmanagement/mapfile-vers, \ + MAPFILE := $(LIBMANAGEMENT_MAPFILE), \ LDFLAGS := $(LDFLAGS_JDKLIB) \ $(call SET_SHARED_LIBRARY_ORIGIN), \ LDFLAGS_solaris := -lkstat, \ --- ./jdk/make/mapfiles/libawt/mapfile-mawt-vers Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/mapfiles/libawt/mapfile-mawt-vers Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -249,11 +249,6 @@ Java_sun_awt_motif_XsessionWMcommand; Java_sun_awt_motif_XsessionWMcommand_New; - # Java Plugin - getAwtLockFunctions; - getAwtData; - getAwtDisplay; - # libfontmanager entry points AWTIsHeadless; AWTCountFonts; --- ./jdk/make/mapfiles/libawt/mapfile-vers Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/mapfiles/libawt/mapfile-vers Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -174,13 +174,6 @@ Java_sun_awt_motif_XsessionWMcommand; Java_sun_awt_motif_XsessionWMcommand_New; - # Java Plugin - # This is in awt_LoadLibrary.c and falls through to libmawt. - # Evidently plugin needs this for backward compatability. - getAwtLockFunctions; - getAwtData; - getAwtDisplay; - # libfontmanager entry points AWTIsHeadless; GrPrim_Sg2dGetCompInfo; --- ./jdk/make/mapfiles/libawt/mapfile-vers-linux Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/mapfiles/libawt/mapfile-vers-linux Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -152,13 +152,6 @@ # Evidently CDE needs this for backward compatability. Java_sun_awt_motif_XsessionWMcommand; - # Java Plugin - # This is in awt_LoadLibrary.c and falls through to libmawt. - # Evidently plugin needs this for backward compatability. - getAwtLockFunctions; - getAwtData; - getAwtDisplay; - # libfontmanager entry points AWTIsHeadless; GrPrim_Sg2dGetCompInfo; @@ -283,11 +276,6 @@ # CDE private entry point Java_sun_awt_motif_XsessionWMcommand; - # Java Plugin - getAwtLockFunctions; - getAwtData; - getAwtDisplay; - # libfontmanager entry points AWTIsHeadless; AWTCountFonts; --- ./jdk/make/mapfiles/libawt_xawt/mapfile-vers Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/mapfiles/libawt_xawt/mapfile-vers Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -449,12 +449,6 @@ awt_Lock; awt_GetComponent; - # Java Plugin - # This is in awt_LoadLibrary.c and falls through to libmawt. - # Evidently plugin needs this for backward compatability. - getAwtLockFunctions; - getAwtData; - getAwtDisplay; #XAWT entry point for CDE Java_sun_awt_motif_XsessionWMcommand; Java_sun_awt_motif_XsessionWMcommand_New; --- ./jdk/make/mapfiles/libjava/mapfile-vers Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/mapfiles/libjava/mapfile-vers Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -78,13 +78,13 @@ Java_java_io_FileInputStream_available; Java_java_io_FileInputStream_close0; Java_java_io_FileInputStream_initIDs; - Java_java_io_FileInputStream_open; + Java_java_io_FileInputStream_open0; Java_java_io_FileInputStream_read0; Java_java_io_FileInputStream_readBytes; Java_java_io_FileInputStream_skip; Java_java_io_FileOutputStream_close0; Java_java_io_FileOutputStream_initIDs; - Java_java_io_FileOutputStream_open; + Java_java_io_FileOutputStream_open0; Java_java_io_FileOutputStream_write; Java_java_io_FileOutputStream_writeBytes; Java_java_io_ObjectInputStream_bytesToDoubles; @@ -97,7 +97,7 @@ Java_java_io_RandomAccessFile_getFilePointer; Java_java_io_RandomAccessFile_initIDs; Java_java_io_RandomAccessFile_length; - Java_java_io_RandomAccessFile_open; + Java_java_io_RandomAccessFile_open0; Java_java_io_RandomAccessFile_read0; Java_java_io_RandomAccessFile_readBytes; Java_java_io_RandomAccessFile_seek0; @@ -269,6 +269,9 @@ Java_sun_reflect_Reflection_getCallerClass__; Java_sun_reflect_Reflection_getCallerClass__I; Java_sun_reflect_Reflection_getClassAccessFlags; + Java_sun_misc_URLClassPath_knownToNotExist0; + Java_sun_misc_URLClassPath_getLookupCacheURLs; + Java_sun_misc_URLClassPath_getLookupCacheForClassLoader; Java_sun_misc_Version_getJdkVersionInfo; Java_sun_misc_Version_getJdkSpecialVersion; Java_sun_misc_Version_getJvmVersionInfo; @@ -287,8 +290,6 @@ # Java_sun_misc_VM_unsuspendSomeThreads; threads.c # Java_sun_misc_VM_unsuspendThreads; threads.c - Java_sun_util_locale_provider_HostLocaleProviderAdapterImpl_getPattern; - # Outcalls from libjvm done using dlsym(). VerifyClassCodes; --- ./jdk/make/mapfiles/libjava/reorder-sparc Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/mapfiles/libjava/reorder-sparc Wed Feb 04 12:14:43 2015 -0800 @@ -44,7 +44,7 @@ text: .text%Java_java_io_UnixFileSystem_canonicalize; text: .text%JNU_GetStringPlatformChars; text: .text%JNU_ReleaseStringPlatformChars; -text: .text%Java_java_io_FileInputStream_open; +text: .text%Java_java_io_FileInputStream_open0; text: .text%fileOpen; text: .text%Java_java_io_FileInputStream_readBytes; text: .text%readBytes; --- ./jdk/make/mapfiles/libjava/reorder-sparcv9 Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/mapfiles/libjava/reorder-sparcv9 Wed Feb 04 12:14:43 2015 -0800 @@ -47,7 +47,7 @@ text: .text%Java_java_io_UnixFileSystem_canonicalize; text: .text%JNU_GetStringPlatformChars; text: .text%JNU_ReleaseStringPlatformChars; -text: .text%Java_java_io_FileInputStream_open; +text: .text%Java_java_io_FileInputStream_open0; text: .text%fileOpen; text: .text%Java_java_io_FileInputStream_readBytes; text: .text%readBytes; --- ./jdk/make/mapfiles/libjava/reorder-x86 Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/mapfiles/libjava/reorder-x86 Wed Feb 04 12:14:43 2015 -0800 @@ -57,7 +57,7 @@ text: .text%Java_java_lang_ClassLoader_00024NativeLibrary_find; text: .text%Java_java_lang_Float_floatToIntBits; text: .text%Java_java_lang_Double_doubleToLongBits; -text: .text%Java_java_io_FileInputStream_open; +text: .text%Java_java_io_FileInputStream_open0; text: .text%fileOpen; text: .text%Java_java_io_UnixFileSystem_getLength; text: .text%Java_java_io_FileInputStream_readBytes; @@ -90,7 +90,7 @@ text: .text%JNU_CallMethodByName; text: .text%JNU_CallMethodByNameV; text: .text%Java_java_util_logging_FileHandler_lockFile; -text: .text%Java_java_io_FileOutputStream_open; +text: .text%Java_java_io_FileOutputStream_open0; text: .text%Java_java_io_UnixFileSystem_createDirectory; text: .text%Java_java_io_UnixFileSystem_getLastModifiedTime; text: .text%Java_java_util_prefs_FileSystemPreferences_lockFile0; --- ./jdk/make/mapfiles/libjfr/mapfile-vers Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/mapfiles/libjfr/mapfile-vers Wed Feb 04 12:14:43 2015 -0800 @@ -10,8 +10,13 @@ Java_oracle_jrockit_jfr_Process_getpid; Java_oracle_jrockit_jfr_Timing_counterTime; Java_oracle_jrockit_jfr_Timing_init; - Java_oracle_jrockit_jfr_NativeLogger_output0; - Java_oracle_jrockit_jfr_VMJFR_isEnabled; + Java_oracle_jrockit_jfr_Logger_output0; + Java_oracle_jrockit_jfr_JFR_isCommercialFeaturesUnlocked; + Java_oracle_jrockit_jfr_JFR_isStarted; + Java_oracle_jrockit_jfr_JFR_isSupportedInVM; + Java_oracle_jrockit_jfr_JFR_startFlightRecorder; + Java_oracle_jrockit_jfr_JFR_isDisabledOnCommandLine; + Java_oracle_jrockit_jfr_JFR_isEnabled; Java_oracle_jrockit_jfr_VMJFR_options; Java_oracle_jrockit_jfr_VMJFR_init; Java_oracle_jrockit_jfr_VMJFR_addConstPool; @@ -33,7 +38,6 @@ Java_oracle_jrockit_jfr_VMJFR_setPeriod; Java_oracle_jrockit_jfr_VMJFR_getPeriod; Java_oracle_jrockit_jfr_VMJFR_descriptors; - Java_oracle_jrockit_jfr_VMJFR_redefineClass0; Java_oracle_jrockit_jfr_VMJFR_retransformClasses0; JNI_OnLoad; local: --- ./jdk/make/mapfiles/libsplashscreen/mapfile-vers Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/mapfiles/libsplashscreen/mapfile-vers Wed Feb 04 12:14:43 2015 -0800 @@ -35,6 +35,7 @@ Java_java_awt_SplashScreen__1getImageFileName; Java_java_awt_SplashScreen__1getImageJarName; Java_java_awt_SplashScreen__1setImageData; + Java_java_awt_SplashScreen__1getScaleFactor; SplashLoadMemory; SplashLoadFile; --- ./jdk/make/profile-rtjar-includes.txt Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/make/profile-rtjar-includes.txt Wed Feb 04 12:14:43 2015 -0800 @@ -117,6 +117,8 @@ com/sun/security/ntlm \ com/sun/security/sasl \ com/sun/tracing \ + jdk/management \ + jdk/internal/cmm \ java/lang/instrument \ java/lang/management \ java/security/acl \ --- ./jdk/src/aix/classes/sun/awt/fontconfigs/aix.fontconfig.properties Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/src/aix/classes/sun/awt/fontconfigs/aix.fontconfig.properties Wed Feb 04 12:14:43 2015 -0800 @@ -24,9 +24,14 @@ # questions. # -# Minimal version for AIX using the standard Latin Type1 Fonts from the -# package X11.fnt.iso_T1. These fonts are installed by default into -# "/usr/lpp/X11/lib/X11/fonts/Type1" and sym-linked to "/usr/lib/X11/fonts/Type1" +# +# Portions Copyright (c) 2014 IBM Corporation +# + +# This file references the standard Latin Type1 fonts from the AIX package +# X11.fnt.iso_T1 and the Unicode TrueType fonts from X11.fnt.ucs.ttf. They +# are located by default under "/usr/lpp/X11/lib/X11/fonts/{Type1,TrueType}" +# and sym-linked to "/usr/lib/X11/fonts/". # Version @@ -34,44 +39,381 @@ # Component Font Mappings -dialog.plain.latin-1=-*-helvetica-medium-r-normal--*-%d-100-100-p-*-iso10646-1 -dialog.bold.latin-1=-*-helvetica-bold-r-normal--*-%d-100-100-p-*-iso10646-1 -dialog.italic.latin-1=-*-helvetica-medium-o-normal--*-%d-100-100-p-*-iso10646-1 -dialog.bolditalic.latin-1=-*-helvetica-bold-o-normal--*-%d-100-100-p-*-iso10646-1 +allfonts.iso10646-extB=-monotype-sansmonowtextb-medium-r-normal--*-%d-75-75-m-*-unicode-2 -dialoginput.plain.latin-1=-*-courier-medium-r-normal--*-%d-100-100-m-*-iso10646-1 -dialoginput.bold.latin-1=-*-courier-bold-r-normal--*-%d-100-100-m-*-iso10646-1 -dialoginput.italic.latin-1=-*-courier-medium-o-normal--*-%d-100-100-m-*-iso10646-1 -dialoginput.bolditalic.latin-1=-*-courier-bold-o-normal--*-%d-100-100-m-*-iso10646-1 -sansserif.plain.latin-1=-*-helvetica-medium-r-normal--*-%d-100-100-p-*-iso10646-1 -sansserif.bold.latin-1=-*-helvetica-bold-r-normal--*-%d-100-100-p-*-iso10646-1 -sansserif.italic.latin-1=-*-helvetica-medium-o-normal--*-%d-100-100-p-*-iso10646-1 -sansserif.bolditalic.latin-1=-*-helvetica-bold-o-normal--*-%d-100-100-p-*-iso10646-1 +dialog.plain.latin-1=-*-helvetica-medium-r-normal--*-%d-100-100-p-*-iso8859-1 +dialog.plain.thai=-ibm-thaihelvetica-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +dialog.plain.ukranian-ibm1124=-*-*-medium-r-normal--*-%d-75-75-p-*-ucs2.i18n-0 +dialog.plain.japanese-x0208=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +dialog.plain.japanese-x0201=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +dialog.plain.japanese-udc=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +dialog.plain.japanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +dialog.plain.korean=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +dialog.plain.korean-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +dialog.plain.chinese=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +dialog.plain.chinese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +dialog.plain.taiwanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 -serif.plain.latin-1=-*-times new roman-medium-r-normal--*-%d-100-100-p-*-iso10646-1 -serif.bold.latin-1=-*-times new roman-bold-r-normal--*-%d-100-100-p-*-iso10646-1 -serif.italic.latin-1=-*-times new roman-medium-i-normal--*-%d-100-100-p-*-iso10646-1 -serif.bolditalic.latin-1=-*-times new roman-bold-i-normal--*-%d-100-100-p-*-iso10646-1 +dialog.bold.latin-1=-*-helvetica-bold-r-normal--*-%d-100-100-p-*-iso8859-1 +dialog.bold.thai=-ibm-thaihelvetica-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +dialog.bold.ukranian-ibm1124=-*-*-bold-r-normal--*-%d-75-75-p-*-ucs2.i18n-0 +dialog.bold.japanese-x0208=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +dialog.bold.japanese-x0201=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +dialog.bold.japanese-udc=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +dialog.bold.japanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +dialog.bold.korean=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +dialog.bold.korean-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +dialog.bold.chinese=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +dialog.bold.chinese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +dialog.bold.taiwanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 -monospaced.plain.latin-1=-*-courier-medium-r-normal--*-%d-100-100-m-*-iso10646-1 -monospaced.bold.latin-1=-*-courier-bold-r-normal--*-%d-100-100-m-*-iso10646-1 -monospaced.italic.latin-1=-*-courier-medium-o-normal--*-%d-100-100-m-*-iso10646-1 -monospaced.bolditalic.latin-1=-*-courier-bold-o-normal--*-%d-100-100-m-*-iso10646-1 +dialog.italic.latin-1=-*-helvetica-medium-o-normal--*-%d-100-100-p-*-iso8859-1 +dialog.italic.thai=-ibm-thaihelvetica-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +dialog.italic.ukranian-ibm1124=-*-*-medium-i-normal--*-%d-75-75-p-*-ucs2.i18n-0 +dialog.italic.japanese-x0208=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +dialog.italic.japanese-x0201=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +dialog.italic.japanese-udc=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +dialog.italic.japanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +dialog.italic.korean=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +dialog.italic.korean-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +dialog.italic.chinese=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +dialog.italic.chinese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +dialog.italic.taiwanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +dialog.bolditalic.latin-1=-*-helvetica-bold-o-normal--*-%d-100-100-p-*-iso8859-1 +dialog.bolditalic.thai=-ibm-thaihelvetica-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +dialog.bolditalic.ukranian-ibm1124=-*-*-medium-r-normal--*-%d-75-75-p-*-ucs2.i18n-0 +dialog.bolditalic.japanese-x0208=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +dialog.bolditalic.japanese-x0201=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +dialog.bolditalic.japanese-udc=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +dialog.bolditalic.japanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +dialog.bolditalic.korean=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +dialog.bolditalic.korean-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +dialog.bolditalic.chinese=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +dialog.bolditalic.chinese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +dialog.bolditalic.taiwanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +dialoginput.plain.latin-1=-*-courier-medium-r-normal--*-%d-100-100-m-*-iso8859-1 +dialoginput.plain.thai=-ibm-thaicourier-medium-r-normal--*-%d-75-75-m-*-ucs2.thai-0 +dialoginput.plain.ukranian-ibm1124=-*-*-medium-r-normal--*-%d-75-75-m-*-ucs2.i18n-0 +dialoginput.plain.japanese-x0208=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +dialoginput.plain.japanese-x0201=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +dialoginput.plain.japanese-udc=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +dialoginput.plain.japanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +dialoginput.plain.korean=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +dialoginput.plain.korean-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +dialoginput.plain.chinese=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +dialoginput.plain.chinese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +dialoginput.plain.taiwanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +dialoginput.bold.latin-1=-*-courier-bold-r-normal--*-%d-100-100-m-*-iso8859-1 +dialoginput.bold.thai=-ibm-thaicourier-medium-r-normal--*-%d-75-75-m-*-ucs2.thai-0 +dialoginput.bold.ukranian-ibm1124=-*-*-bold-r-normal--*-%d-75-75-m-*-ucs2.i18n-0 +dialoginput.bold.japanese-x0208=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +dialoginput.bold.japanese-x0201=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +dialoginput.bold.japanese-udc=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +dialoginput.bold.japanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +dialoginput.bold.korean=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +dialoginput.bold.korean-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +dialoginput.bold.chinese=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +dialoginput.bold.chinese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +dialoginput.bold.taiwanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +dialoginput.italic.latin-1=-*-courier-medium-o-normal--*-%d-100-100-m-*-iso8859-1 +dialoginput.italic.thai=-ibm-thaicourier-medium-r-normal--*-%d-75-75-m-*-ucs2.thai-0 +dialoginput.italic.ukranian-ibm1124=-*-*-medium-i-normal--*-%d-75-75-m-*-ucs2.i18n-0 +dialoginput.italic.japanese-x0208=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +dialoginput.italic.japanese-x0201=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +dialoginput.italic.japanese-udc=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +dialoginput.italic.japanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +dialoginput.italic.korean=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +dialoginput.italic.korean-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +dialoginput.italic.chinese=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +dialoginput.italic.chinese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +dialoginput.italic.taiwanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +dialoginput.bolditalic.latin-1=-*-courier-bold-o-normal--*-%d-100-100-m-*-iso8859-1 +dialoginput.bolditalic.thai=-ibm-thaicourier-medium-r-normal--*-%d-75-75-m-*-ucs2.thai-0 +dialoginput.bolditalic.ukranian-ibm1124=-*-*-medium-r-normal--*-%d-75-75-m-*-ucs2.i18n-0 +dialoginput.bolditalic.japanese-x0208=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +dialoginput.bolditalic.japanese-x0201=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +dialoginput.bolditalic.japanese-udc=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +dialoginput.bolditalic.japanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +dialoginput.bolditalic.korean=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +dialoginput.bolditalic.korean-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +dialoginput.bolditalic.chinese=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +dialoginput.bolditalic.chinese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +dialoginput.bolditalic.taiwanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +sansserif.plain.latin-1=-*-helvetica-medium-r-normal--*-%d-100-100-p-*-iso8859-1 +sansserif.plain.thai=-ibm-thaihelvetica-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +sansserif.plain.ukranian-ibm1124=-*-*-medium-r-normal--*-%d-75-75-p-*-ucs2.i18n-0 +sansserif.plain.japanese-x0208=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +sansserif.plain.japanese-x0201=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +sansserif.plain.japanese-udc=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +sansserif.plain.japanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +sansserif.plain.korean=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +sansserif.plain.korean-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +sansserif.plain.chinese=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +sansserif.plain.chinese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +sansserif.plain.taiwanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +sansserif.bold.latin-1=-*-helvetica-bold-r-normal--*-%d-100-100-p-*-iso8859-1 +sansserif.bold.thai=-ibm-thaihelvetica-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +sansserif.bold.ukranian-ibm1124=-*-*-bold-r-normal--*-%d-75-75-p-*-ucs2.i18n-0 +sansserif.bold.japanese-x0208=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +sansserif.bold.japanese-x0201=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +sansserif.bold.japanese-udc=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +sansserif.bold.japanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +sansserif.bold.korean=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +sansserif.bold.korean-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +sansserif.bold.chinese=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +sansserif.bold.chinese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +sansserif.bold.taiwanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +sansserif.italic.latin-1=-*-helvetica-medium-o-normal--*-%d-100-100-p-*-iso8859-1 +sansserif.italic.thai=-ibm-thaihelvetica-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +sansserif.italic.ukranian-ibm1124=-*-*-medium-i-normal--*-%d-75-75-p-*-ucs2.i18n-0 +sansserif.italic.japanese-x0208=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +sansserif.italic.japanese-x0201=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +sansserif.italic.japanese-udc=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +sansserif.italic.japanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +sansserif.italic.korean=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +sansserif.italic.korean-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +sansserif.italic.chinese=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +sansserif.italic.chinese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +sansserif.italic.taiwanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +sansserif.bolditalic.latin-1=-*-helvetica-bold-o-normal--*-%d-100-100-p-*-iso8859-1 +sansserif.bolditalic.thai=-ibm-thaihelvetica-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +sansserif.bolditalic.ukranian-ibm1124=-*-*-medium-r-normal--*-%d-75-75-p-*-ucs2.i18n-0 +sansserif.bolditalic.japanese-x0208=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +sansserif.bolditalic.japanese-x0201=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +sansserif.bolditalic.japanese-udc=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +sansserif.bolditalic.japanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +sansserif.bolditalic.korean=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +sansserif.bolditalic.korean-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +sansserif.bolditalic.chinese=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +sansserif.bolditalic.chinese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +sansserif.bolditalic.taiwanese-iso10646=-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +serif.plain.latin-1=-*-times new roman-medium-r-normal--*-%d-100-100-p-*-iso8859-1 +serif.plain.thai=-ibm-thaitimes-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +serif.plain.ukranian-ibm1124=-*-*-medium-r-normal--*-%d-75-75-p-*-ucs2.i18n-0 +serif.plain.japanese-x0208=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +serif.plain.japanese-x0201=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +serif.plain.japanese-udc=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +serif.plain.japanese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +serif.plain.korean=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +serif.plain.korean-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +serif.plain.chinese=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +serif.plain.chinese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +serif.plain.taiwanese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +serif.bold.latin-1=-*-times new roman-bold-r-normal--*-%d-100-100-p-*-iso8859-1 +serif.bold.thai=-ibm-thaitimes-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +serif.bold.ukranian-ibm1124=-*-*-bold-r-normal--*-%d-75-75-p-*-ucs2.i18n-0 +serif.bold.japanese-x0208=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +serif.bold.japanese-x0201=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +serif.bold.japanese-udc=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +serif.bold.japanese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +serif.bold.korean=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +serif.bold.korean-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +serif.bold.chinese=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +serif.bold.chinese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +serif.bold.taiwanese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + + +serif.italic.latin-1=-*-times new roman-medium-i-normal--*-%d-100-100-p-*-iso8859-1 +serif.italic.thai=-ibm-thaitimes-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +serif.italic.ukranian-ibm1124=-*-*-medium-i-normal--*-%d-75-75-p-*-ucs2.i18n-0 +serif.italic.japanese-x0208=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +serif.italic.japanese-x0201=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +serif.italic.japanese-udc=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +serif.italic.japanese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +serif.italic.korean=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +serif.italic.korean-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +serif.italic.chinese=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +serif.italic.chinese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +serif.italic.taiwanese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +serif.bolditalic.latin-1=-*-times new roman-bold-i-normal--*-%d-100-100-p-*-iso8859-1 +serif.bolditalic.thai=-ibm-thaitimes-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0 +serif.bolditalic.ukranian-ibm1124=-*-*-medium-r-normal--*-%d-75-75-p-*-ucs2.i18n-0 +serif.bolditalic.japanese-x0208=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +serif.bolditalic.japanese-x0201=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +serif.bolditalic.japanese-udc=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +serif.bolditalic.japanese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +serif.bolditalic.korean=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +serif.bolditalic.korean-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +serif.bolditalic.chinese=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +serif.bolditalic.chinese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +serif.bolditalic.taiwanese-iso10646=-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +monospaced.plain.latin-1=-*-courier-medium-r-normal--*-%d-100-100-m-*-iso8859-1 +monospaced.plain.thai=-ibm-thaicourier-medium-r-normal--*-%d-75-75-m-*-ucs2.thai-0 +monospaced.plain.ukranian-ibm1124=-*-*-medium-r-normal--*-%d-75-75-m-*-ucs2.i18n-0 +monospaced.plain.japanese-x0208=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +monospaced.plain.japanese-x0201=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +monospaced.plain.japanese-udc=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +monospaced.plain.japanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +monospaced.plain.korean=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +monospaced.plain.korean-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +monospaced.plain.chinese=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +monospaced.plain.chinese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +monospaced.plain.taiwanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +monospaced.bold.latin-1=-*-courier-bold-r-normal--*-%d-100-100-m-*-iso8859-1 +monospaced.bold.thai=-ibm-thaicourier-medium-r-normal--*-%d-75-75-m-*-ucs2.thai-0 +monospaced.bold.ukranian-ibm1124=-*-*-bold-r-normal--*-%d-75-75-m-*-ucs2.i18n-0 +monospaced.bold.japanese-x0208=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +monospaced.bold.japanese-x0201=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +monospaced.bold.japanese-udc=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +monospaced.bold.japanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +monospaced.bold.korean=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +monospaced.bold.korean-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +monospaced.bold.chinese=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +monospaced.bold.chinese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +monospaced.bold.taiwanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +monospaced.italic.latin-1=-*-courier-medium-o-normal--*-%d-100-100-m-*-iso8859-1 +monospaced.italic.thai=-ibm-thaicourier-medium-r-normal--*-%d-75-75-m-*-ucs2.thai-0 +monospaced.italic.ukranian-ibm1124=-*-*-medium-i-normal--*-%d-75-75-m-*-ucs2.i18n-0 +monospaced.italic.japanese-x0208=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +monospaced.italic.japanese-x0201=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +monospaced.italic.japanese-udc=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +monospaced.italic.japanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +monospaced.italic.korean=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +monospaced.italic.korean-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +monospaced.italic.chinese=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +monospaced.italic.chinese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +monospaced.italic.taiwanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 + +monospaced.bolditalic.latin-1=-*-courier-bold-o-normal--*-%d-100-100-m-*-iso8859-1 +monospaced.bolditalic.thai=-ibm-thaicourier-medium-r-normal--*-%d-75-75-m-*-ucs2.thai-0 +monospaced.bolditalic.ukranian-ibm1124=-*-*-medium-r-normal--*-%d-75-75-m-*-ucs2.i18n-0 +monospaced.bolditalic.japanese-x0208=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0 +monospaced.bolditalic.japanese-x0201=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0 +monospaced.bolditalic.japanese-udc=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp +monospaced.bolditalic.japanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0 +monospaced.bolditalic.korean=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0 +monospaced.bolditalic.korean-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0 +monospaced.bolditalic.chinese=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0 +monospaced.bolditalic.chinese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0 +monospaced.bolditalic.taiwanese-iso10646=-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0 # Search Sequences sequence.allfonts=latin-1 +sequence.allfonts.UTF-8=latin-1,japanese-iso10646 +# Uk_UA +sequence.allfonts.x-IBM1124=latin-1,ukranian-ibm1124 +# Japanese +sequence.allfonts.x-IBM943C=latin-1,japanese-x0201,japanese-x0208,japanese-udc +sequence.allfonts.x-IBM29626C=latin-1,japanese-x0201,japanese-x0208,japanese-udc +sequence.allfonts.UTF-8.ja=japanese-iso10646,latin-1,iso10646-extB +# Chinese +sequence.allfonts.x-EUC_CN=latin-1,chinese +sequence.allfonts.GB18030=latin-1,chinese-iso10646,iso10646-extB +sequence.allfonts.UTF-8.zh.CN=latin-1,chinese-iso10646,iso10646-extB +# Taiwanese +sequence.allfonts.x-IBM964=latin-1,taiwanese-iso10646 +sequence.allfonts.Big5=latin-1,taiwanese-iso10646 +sequence.allfonts.UTF-8.zh.TW=latin-1,taiwanese-iso10646 +# Korean +sequence.allfonts.x-IBM970=latin-1,korean +sequence.allfonts.UTF-8.ko=latin-1,korean-iso10646 +# Thai +sequence.allfonts.TIS-620=latin-1,thai +sequence.allfonts.UTF-8.th=latin-1,thai +# fallback +sequence.fallback=thai,chinese-iso10646,taiwanese-iso10646,japanese-iso10646,korean-iso10646,iso10646-extB -filename.-*-courier-medium-r-normal--*-%d-100-100-m-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/cour.pfa -filename.-*-courier-bold-r-normal--*-%d-100-100-m-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/courb.pfa -filename.-*-courier-medium-o-normal--*-%d-100-100-m-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/couri.pfa -filename.-*-courier-bold-o-normal--*-%d-100-100-m-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/courbi.pfa -filename.-*-helvetica-medium-r-normal--*-%d-100-100-p-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/helv.pfa -filename.-*-helvetica-bold-r-normal--*-%d-100-100-p-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/helvb.pfa -filename.-*-helvetica-medium-o-normal--*-%d-100-100-p-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/helvi.pfa -filename.-*-helvetica-bold-o-normal--*-%d-100-100-p-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/helvbi.pfa -filename.-*-times_new_roman-medium-r-normal--*-%d-100-100-p-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/tnr.pfa -filename.-*-times_new_roman-bold-r-normal--*-%d-100-100-p-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/tnrb.pfa -filename.-*-times_new_roman-medium-i-normal--*-%d-100-100-p-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/tnri.pfa -filename.-*-times_new_roman-bold-i-normal--*-%d-100-100-p-*-iso10646-1=/usr/lpp/X11/lib/X11/fonts/Type1/tnrbi.pfa +# Exclusion Ranges +exclusion.japanese-iso10646=0000-00ff + +# Font File Names +filename.-*-courier-medium-r-normal--*-%d-100-100-m-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/cour.pfa +filename.-*-courier-bold-r-normal--*-%d-100-100-m-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/courb.pfa +filename.-*-courier-medium-o-normal--*-%d-100-100-m-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/couri.pfa +filename.-*-courier-bold-o-normal--*-%d-100-100-m-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/courbi.pfa +filename.-*-helvetica-medium-r-normal--*-%d-100-100-p-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/helv.pfa +filename.-*-helvetica-bold-r-normal--*-%d-100-100-p-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/helvb.pfa +filename.-*-helvetica-medium-o-normal--*-%d-100-100-p-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/helvi.pfa +filename.-*-helvetica-bold-o-normal--*-%d-100-100-p-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/helvbi.pfa +filename.-*-times_new_roman-medium-r-normal--*-%d-100-100-p-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/tnr.pfa +filename.-*-times_new_roman-bold-r-normal--*-%d-100-100-p-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/tnrb.pfa +filename.-*-times_new_roman-medium-i-normal--*-%d-100-100-p-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/tnri.pfa +filename.-*-times_new_roman-bold-i-normal--*-%d-100-100-p-*-iso8859-1=/usr/lpp/X11/lib/X11/fonts/Type1/tnrbi.pfa + + +filename.-monotype-sansmonowtextb-medium-r-normal--*-%d-75-75-m-*-unicode-2=/usr/lpp/X11/lib/X11/fonts/TrueType/MTSanXBA.ttf + +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_k.ttf +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_k.ttf +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_s.ttf +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_s.ttf +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_s.ttf +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_s.ttf +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_t.ttf + +filename.-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf +filename.-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf +filename.-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf +filename.-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf +filename.-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdk.ttf +filename.-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdk.ttf +filename.-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansds.ttf +filename.-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansds.ttf +filename.-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdt.ttf +filename.-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0208.1983-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsans_j.ttf +filename.-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-jisx0201.1976-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsans_j.ttf +filename.-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ibm-udcjp=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsans_j.ttf +filename.-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_japan-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsans_j.ttf +filename.-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ksc5601.1987-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsans_k.ttf +filename.-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_korea-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsans_k.ttf +filename.-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-gb2312.1980-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsans_s.ttf +filename.-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_china-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsans_s.ttf +filename.-monotype-sanswt-medium-r-normal--*-%d-75-75-*-*-ucs2.cjk_taiwan-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsans_t.ttf + +filename.-monotype-timesnewromanwt-medium-r-normal--*-%d-75-75-*-*-iso8859-15=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-monotype-sansmonowt-medium-r-normal--*-%d-75-75-*-*-iso8859-15=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf + +filename.-ibm-thaicourier-medium-r-normal--*-%d-75-75-m-*-ucs2.thai-0=/usr/lpp/X11/lib/X11/fonts/TrueType/courth.ttf +filename.-ibm-thaihelvetica-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0=/usr/lpp/X11/lib/X11/fonts/TrueType/helvth.ttf +filename.-ibm-thaitimes-medium-r-normal--*-%d-75-75-p-*-ucs2.thai-0=/usr/lpp/X11/lib/X11/fonts/TrueType/timeth.ttf + +filename.-*-*-medium-r-normal--*-%d-*-*-p-*-ibm-1046=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-*-*-bold-r-normal--*-%d-*-*-p-*-ibm-1046=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-*-*-medium-i-normal--*-%d-*-*-p-*-ibm-1046=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-*-*-bold-i-normal--*-%d-*-*-p-*-ibm-1046=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-*-*-medium-r-normal--*-%d-*-*-m-*-ibm-1046=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf +filename.-*-*-bold-r-normal--*-%d-*-*-m-*-ibm-1046=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf +filename.-*-*-medium-i-normal--*-%d-*-*-m-*-ibm-1046=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf +filename.-*-*-bold-i-normal--*-%d-*-*-m-*-ibm-1046=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf + +filename.-*-*-medium-r-normal--*-%d-75-75-p-*-ucs2.i18n-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-*-*-bold-r-normal--*-%d-75-75-p-*-ucs2.i18n-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-*-*-medium-i-normal--*-%d-75-75-p-*-ucs2.i18n-0=/usr/lpp/X11/lib/X11/fonts/TrueType/tnrwt_j.ttf +filename.-*-*-medium-r-normal--*-%d-75-75-m-*-ucs2.i18n-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf +filename.-*-*-bold-r-normal--*-%d-75-75-m-*-ucs2.i18n-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf +filename.-*-*-medium-i-normal--*-%d-75-75-m-*-ucs2.i18n-0=/usr/lpp/X11/lib/X11/fonts/TrueType/mtsansdj.ttf + +# AWT font path +awtfontpath.japanese-x0201=/usr/lpp/X11/lib/X11/fonts/TrueType +awtfontpath.japanese-x0208=/usr/lpp/X11/lib/X11/fonts/TrueType +awtfontpath.japanese-udc=/usr/lpp/X11/lib/X11/fonts/TrueType +awtfontpath.japanese-iso10646=/usr/lpp/X11/lib/X11/fonts/TrueType +awtfontpath.korean=/usr/lpp/X11/lib/X11/fonts/TrueType +awtfontpath.korean-iso10646=/usr/lpp/X11/lib/X11/fonts/TrueType +awtfontpath.chinese=/usr/lpp/X11/lib/X11/fonts/TrueType +awtfontpath.chinese-iso10646=/usr/lpp/X11/lib/X11/fonts/TrueType +awtfontpath.taiwanese-iso10646=/usr/lpp/X11/lib/X11/fonts/TrueType +awtfontpath.thai=/usr/lpp/X11/lib/X11/fonts/TrueType +awtfontpath.iso10646-extB=/usr/lpp/X11/lib/X11/fonts/TrueType + --- ./jdk/src/bsd/doc/man/java.1 Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/src/bsd/doc/man/java.1 Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ '\" t -.\" Copyright (c) 1994, 2014, Oracle and/or its affiliates. All rights reserved. +.\" Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved. .\" .\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. .\" @@ -23,14 +23,15 @@ .\" .\" Title: java .\" Language: English -.\" Date: 08 August 2014 +.\" Date: 03 March 2015 .\" SectDesc: Basic Tools .\" Software: JDK 8 .\" Arch: generic -.\" Part Number: E38207-03 +.\" Part Number: E38207-04 +.\" Doc ID: JSSON .\" .if n .pl 99999 -.TH "java" "1" "08 August 2014" "JDK 8" "Basic Tools" +.TH "java" "1" "03 March 2015" "JDK 8" "Basic Tools" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -333,7 +334,6 @@ Selects the Java HotSpot Client VM\&. The 64\-bit version of the Java SE Development Kit (JDK) currently ignores this option and instead uses the Server JVM\&. .sp For default JVM selection, see Server\-Class Machine Detection at - http://docs\&.oracle\&.com/javase/8/docs/technotes/guides/vm/server\-class\&.html .RE .PP @@ -550,7 +550,6 @@ Selects the Java HotSpot Server VM\&. The 64\-bit version of the JDK supports only the Server VM, so in that case the option is implicit\&. .sp For default JVM selection, see Server\-Class Machine Detection at - http://docs\&.oracle\&.com/javase/8/docs/technotes/guides/vm/server\-class\&.html .RE .PP @@ -772,13 +771,14 @@ \fBg\fR or \fBG\fR -to indicate gigabytes\&. By default, the value is set to 48 MB: +to indicate gigabytes\&. The default maximum code cache size is 240 MB; if you disable tiered compilation with the option +\fB\-XX:\-TieredCompilation\fR, then the default size is 48 MB: .sp .if n \{\ .RS 4 .\} .nf -\fB\-Xmaxjitcodesize=48m\fR +\fB\-Xmaxjitcodesize=240m\fR .fi .if n \{\ @@ -889,7 +889,9 @@ \fB\-Xms\fR and \fB\-Xmx\fR -are often set to the same value\&. For more information, see Garbage Collector Ergonomics at http://docs\&.oracle\&.com/javase/8/docs/technotes/guides/vm/gc\-ergonomics\&.html +are often set to the same value\&. See the section "Ergonomics" in +\fIJava SE HotSpot Virtual Machine Garbage Collection Tuning Guide\fR +at http://docs\&.oracle\&.com/javase/8/docs/technotes/guides/vm/gctuning/index\&.html\&. .sp The following examples show how to set the maximum allowed size of allocated memory to 80 MB using various units: .sp @@ -986,28 +988,30 @@ .PP \-Xshare:\fImode\fR .RS 4 -Sets the class data sharing mode\&. Possible +Sets the class data sharing (CDS) mode\&. Possible \fImode\fR arguments for this option include the following: .PP auto .RS 4 -Use shared class data if possible\&. This is the default value for Java HotSpot 32\-Bit Client VM\&. +Use CDS if possible\&. This is the default value for Java HotSpot 32\-Bit Client VM\&. .RE .PP on .RS 4 -Require the use of class data sharing\&. Print an error message and exit if class data sharing cannot be used\&. +Require the use of CDS\&. Print an error message and exit if class data sharing cannot be used\&. .RE .PP off .RS 4 -Do not use shared class data\&. This is the default value for Java HotSpot 32\-Bit Server VM, Java HotSpot 64\-Bit Client VM, and Java HotSpot 64\-Bit Server VM\&. +Do not use CDS\&. This is the default value for Java HotSpot 32\-Bit Server VM, Java HotSpot 64\-Bit Client VM, and Java HotSpot 64\-Bit Server VM\&. .RE .PP dump .RS 4 -Manually generate the class data sharing archive\&. +Manually generate the CDS archive\&. Specify the application class path as described in "Setting the Class Path "\&. +.sp +You should regenerate the CDS archive with each new JDK release\&. .RE .RE .PP @@ -1120,17 +1124,6 @@ Oracle Solaris/x64 (64\-bit): 1024 KB .RE .sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -Windows: depends on virtual memory -.RE -.sp The following examples set the thread stack size to 1024 KB in different units: .sp .if n \{\ @@ -1172,7 +1165,7 @@ .PP remote .RS 4 -Verify only those classes that are loaded remotely over the network\&. This is the default behavior if you do not specify the +Verify those classes that are not loaded by the bootstrap class loader\&. This is the default behavior if you do not specify the \fB\-Xverify\fR option\&. .RE @@ -1186,6 +1179,65 @@ .PP These options control the runtime behavior of the Java HotSpot VM\&. .PP +\-XX:+CheckEndorsedAndExtDirs +.RS 4 +Enables the option to prevent the +\fBjava\fR +command from running a Java application if it uses the endorsed\-standards override mechanism or the extension mechanism\&. This option checks if an application is using one of these mechanisms by checking the following: +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +The +\fBjava\&.ext\&.dirs\fR +or +\fBjava\&.endorsed\&.dirs\fR +system property is set\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +The +\fBlib/endorsed\fR +directory exists and is not empty\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +The +\fBlib/ext\fR +directory contains any JAR files other than those of the JDK\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +The system\-wide platform\-specific extension directory contains any JAR files\&. +.RE +.RE +.PP \-XX:+DisableAttachMechanism .RS 4 Enables the option that disables the mechanism that lets tools attach to the JVM\&. By default, this option is disabled, meaning that the attach mechanism is enabled and you can use tools such as @@ -1238,7 +1290,7 @@ .PP \-XX:LargePageSizeInBytes=\fIsize\fR .RS 4 -Sets the maximum size (in bytes) for large pages used for Java heap\&. The +On Solaris, sets the maximum size (in bytes) for large pages used for Java heap\&. The \fIsize\fR argument must be a power of 2 (2, 4, 8, 16, \&.\&.\&.)\&. Append the letter \fBk\fR @@ -1327,6 +1379,17 @@ .RE .RE .PP +\-XX:ObjectAlignmentInBytes=\fIalignment\fR +.RS 4 +Sets the memory alignment of Java objects (in bytes)\&. By default, the value is set to 8 bytes\&. The specified value should be a power of two, and must be within the range of 8 and 256 (inclusive)\&. This option makes it possible to use compressed pointers with large Java heap sizes\&. +.sp +The heap size limit in bytes is calculated as: +.sp +\fB4GB * ObjectAlignmentInBytes\fR +.sp +Note: As the alignment value increases, the unused space between objects will also increase\&. As a result, you may not realize any benefits from using compressed pointers with large Java heap sizes\&. +.RE +.PP \-XX:OnError=\fIstring\fR .RS 4 Sets a custom command or a series of semicolon\-separated commands to run when an irrecoverable error occurs\&. If the string contains spaces, then it must be enclosed in quotation marks\&. @@ -1360,6 +1423,28 @@ option\&. .RE .PP +\-XX:+PerfDataSaveToFile +.RS 4 +If enabled, saves +jstat(1) binary data when the Java application exits\&. This binary data is saved in a file named +\fBhsperfdata_\fR\fI\fR, where +\fI\fR +is the process identifier of the Java application you ran\&. Use +\fBjstat\fR +to display the performance data contained in this file as follows: +.sp +.if n \{\ +.RS 4 +.\} +.nf +\fBjstat \-class file:///\fR\fB\fI\fR\fR\fB/hsperfdata_\fR\fB\fI\fR\fR +\fBjstat \-gc file:///\fR\fB\fI\fR\fR\fB/hsperfdata_\fR\fB\fI\fR\fR +.fi +.if n \{\ +.RE +.\} +.RE +.PP \-XX:+PrintCommandLineFlags .RS 4 Enables printing of ergonomically selected JVM flags that appeared on the command line\&. It can be useful to know the ergonomic values set by the JVM, such as the heap space size and the selected garbage collector\&. By default, this option is disabled and flags are not printed\&. @@ -1463,17 +1548,6 @@ Oracle Solaris/x64 (64\-bit): 1024 KB .RE .sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -Windows: depends on virtual memory -.RE -.sp The following examples show how to set the thread stack size to 1024 KB in different units: .sp .if n \{\ @@ -1537,13 +1611,27 @@ \-XX:\-UseCompressedOops .RS 4 Disables the use of compressed pointers\&. By default, this option is enabled, and compressed pointers are used when Java heap sizes are less than 32 GB\&. When this option is enabled, object references are represented as 32\-bit offsets instead of 64\-bit pointers, which typically increases performance when running the application with Java heap sizes less than 32 GB\&. This option works only for 64\-bit JVMs\&. +.sp +It is also possible to use compressed pointers when Java heap sizes are greater than 32GB\&. See the +\fB\-XX:ObjectAlignmentInBytes\fR +option\&. .RE .PP -\-XX:\-UseLargePages -.RS 4 -Disables the use of large page memory\&. This option is enabled by default\&. +\-XX:+UseHugeTLBFS +.RS 4 +This option for Linux is the equivalent of specifying +\fB\-XX:+UseLargePages\fR\&. This option is disabled by default\&. This option pre\-allocates all large pages up\-front, when memory is reserved; consequently the JVM cannot dynamically grow or shrink large pages memory areas; see +\fB\-XX:UseTransparentHugePages\fR +if you want this behavior\&. .sp -For more information, see Java Support for Large Memory Pages at http://www\&.oracle\&.com/technetwork/java/javase/tech/largememory\-jsp\-137182\&.html +For more information, see "Large Pages"\&. +.RE +.PP +\-XX:+UseLargePages +.RS 4 +Enables the use of large page memory\&. By default, this option is disabled and large page memory is not used\&. +.sp +For more information, see "Large Pages"\&. .RE .PP \-XX:+UseMembar @@ -1563,6 +1651,13 @@ \fB\-XX:\-UsePerfData\fR\&. .RE .PP +\-XX:+UseTransparentHugePages +.RS 4 +On Linux, enables the use of large pages that can dynamically grow or shrink\&. This option is disabled by default\&. You may encounter performance problems with transparent huge pages as the OS moves other pages around to create huge pages; this option is made available for experimentation\&. +.sp +For more information, see "Large Pages"\&. +.RE +.PP \-XX:+AllowUserSignalHandlers .RS 4 Enables installation of signal handlers by the application\&. By default, this option is disabled and the application is not allowed to install signal handlers\&. @@ -2041,7 +2136,8 @@ .PP \-XX:CompileThreshold=\fIinvocations\fR .RS 4 -Sets the number of interpreted method invocations before compilation\&. By default, in the server JVM, the JIT compiler performs 10,000 interpreted method invocations to gather information for efficient compilation\&. For the client JVM, the default setting is 1,500 invocations\&. The following example shows how to set the number of interpreted method invocations to 5,000: +Sets the number of interpreted method invocations before compilation\&. By default, in the server JVM, the JIT compiler performs 10,000 interpreted method invocations to gather information for efficient compilation\&. For the client JVM, the default setting is 1,500 invocations\&. This option is ignored when tiered compilation is enabled; see the option +\fB\-XX:+TieredCompilation\fR\&. The following example shows how to set the number of interpreted method invocations to 5,000: .sp .if n \{\ .RS 4 @@ -2078,7 +2174,7 @@ \fBg\fR or \fBG\fR -to indicate gigabytes\&. The default value is set to 500 KB\&. The following example shows how to set the initial code cache size to 32 KB: +to indicate gigabytes\&. The default value is set to 500 KB\&. The initial code cache size should be not less than the system\*(Aqs minimal memory page size\&. The following example shows how to set the initial code cache size to 32 KB: .sp .if n \{\ .RS 4 @@ -2274,13 +2370,29 @@ \fBg\fR or \fBG\fR -to indicate gigabytes\&. This option is equivalent to +to indicate gigabytes\&. The default maximum code cache size is 240 MB; if you disable tiered compilation with the option +\fB\-XX:\-TieredCompilation\fR, then the default size is 48 MB\&. This option has a limit of 2 GB; otherwise, an error is generated\&. The maximum code cache size should not be less than the initial code cache size; see the option +\fB\-XX:InitialCodeCacheSize\fR\&. This option is equivalent to \fB\-Xmaxjitcodesize\fR\&. .RE .PP -\-XX:+TieredCompilation -.RS 4 -Enables the use of tiered compilation\&. By default, this option is enabled\&. Only the Java HotSpot Server VM supports this option\&. +\-XX:RTMAbortRatio=\fIabort_ratio\fR +.RS 4 +The RTM abort ratio is specified as a percentage (%) of all executed RTM transactions\&. If a number of aborted transactions becomes greater than this ratio, then the compiled code will be deoptimized\&. This ratio is used when the +\fB\-XX:+UseRTMDeopt\fR +option is enabled\&. The default value of this option is 50\&. This means that the compiled code will be deoptimized if 50% of all transactions are aborted\&. +.RE +.PP +\-XX:RTMRetryCount=\fInumber_of_retries\fR +.RS 4 +RTM locking code will be retried, when it is aborted or busy, the number of times specified by this option before falling back to the normal locking mechanism\&. The default value for this option is 5\&. The +\fB\-XX:UseRTMLocking\fR +option must be enabled\&. +.RE +.PP +\-XX:\-TieredCompilation +.RS 4 +Disables the use of tiered compilation\&. By default, this option is enabled\&. Only the Java HotSpot Server VM supports this option\&. .RE .PP \-XX:+UseAES @@ -2319,45 +2431,20 @@ Enables checking of whether the card is already marked before updating the card table\&. This option is disabled by default and should only be used on machines with multiple sockets, where it will increase performance of Java applications that rely heavily on concurrent operations\&. Only the Java HotSpot Server VM supports this option\&. .RE .PP -\-XX:+UseSuperWord -.RS 4 -Enables the transformation of scalar operations into superword operations\&. This option is enabled by default\&. To disable the transformation of scalar operations into superword operations, specify -\fB\-XX:\-UseSuperWord\fR\&. Only the Java HotSpot Server VM supports this option\&. -.RE -.SS "Experimental JIT Compiler Options" -.PP -The options related to the Restricted Transactional Memory (RTM) locking feature in this section are experimental and are not officially supported in Java SE 8u20; you must enable the -\fB\-XX:+UnlockExperimentalVMOptions\fR -option to use them\&. These options are only available for the Java HotSpot Server VM on x86 CPUs that support Transactional Synchronization Extensions (TSX)\&. -.PP -\-XX:RTMAbortRatio=\fIabort_ratio\fR -.RS 4 -The RTM abort ratio is specified as a percentage (%) of all executed RTM transactions\&. If a number of aborted transactions becomes greater than this ratio, then the compiled code will be deoptimized\&. This ratio is used when the -\fB\-XX:+UseRTMDeopt\fR -option is enabled\&. The default value of this option is 50\&. This means that the compiled code will be deoptimized if 50% of all transactions are aborted\&. -.RE -.PP -\-XX:RTMRetryCount=\fInumber_of_retries\fR -.RS 4 -RTM locking code will be retried, when it is aborted or busy, the number of times specified by this option before falling back to the normal locking mechanism\&. The default value for this option is 5\&. The -\fB\-XX:UseRTMLocking\fR -option must be enabled\&. -.RE -.PP \-XX:+UseRTMDeopt .RS 4 Auto\-tunes RTM locking depending on the abort ratio\&. This ratio is specified by \fB\-XX:RTMAbortRatio\fR option\&. If the number of aborted transactions exceeds the abort ratio, then the method containing the lock will be deoptimized and recompiled with all locks as normal locks\&. This option is disabled by default\&. The -\fB\-XX:UseRTMLocking\fR +\fB\-XX:+UseRTMLocking\fR option must be enabled\&. .RE .PP \-XX:+UseRTMLocking .RS 4 -Generate Restricted Transactional Memory (RTM) locking code for all inflated locks, with the normal locking mechanism as the fallback handler\&. This option is disabled by default\&. +Generate Restricted Transactional Memory (RTM) locking code for all inflated locks, with the normal locking mechanism as the fallback handler\&. This option is disabled by default\&. Options related to RTM are only available for the Java HotSpot Server VM on x86 CPUs that support Transactional Synchronization Extensions (TSX)\&. .sp -RTM is part of Intel\*(Aqs Transactional Synchronization Extensions (TSX), which is an x86 instruction set extension and facilitates the creation of multithreaded applications\&. RTM introduces the new instructions +RTM is part of Intel\*(Aqs TSX, which is an x86 instruction set extension and facilitates the creation of multithreaded applications\&. RTM introduces the new instructions \fBXBEGIN\fR, \fBXABORT\fR, \fBXEND\fR, and @@ -2377,6 +2464,52 @@ .sp RTM improves performance for highly contended locks with low conflict in a critical region (which is code that must not be accessed by more than one thread concurrently)\&. RTM also improves the performance of coarse\-grain locking, which typically does not perform well in multithreaded applications\&. (Coarse\-grain locking is the strategy of holding locks for long periods to minimize the overhead of taking and releasing locks, while fine\-grained locking is the strategy of trying to achieve maximum parallelism by locking only when necessary and unlocking as soon as possible\&.) Also, for lightly contended locks that are used by different threads, RTM can reduce false cache line sharing, also known as cache line ping\-pong\&. This occurs when multiple threads from different processors are accessing different resources, but the resources share the same cache line\&. As a result, the processors repeatedly invalidate the cache lines of other processors, which forces them to read from main memory instead of their cache\&. .RE +.PP +\-XX:+UseSHA +.RS 4 +Enables hardware\-based intrinsics for SHA crypto hash functions for SPARC hardware\&. +\fBUseSHA\fR +is used in conjunction with the +\fBUseSHA1Intrinsics\fR, +\fBUseSHA256Intrinsics\fR, and +\fBUseSHA512Intrinsics\fR +options\&. +.sp +The +\fBUseSHA\fR +and +\fBUseSHA*Intrinsics\fR +flags are enabled by default, and are supported only for Java HotSpot Server VM 64\-bit on SPARC T4 and newer\&. +.sp +This feature is only applicable when using the +\fBsun\&.security\&.provider\&.Sun\fR +provider for SHA operations\&. +.sp +To disable all hardware\-based SHA intrinsics, specify +\fB\-XX:\-UseSHA\fR\&. To disable only a particular SHA intrinsic, use the appropriate corresponding option\&. For example: +\fB\-XX:\-UseSHA256Intrinsics\fR\&. +.RE +.PP +\-XX:+UseSHA1Intrinsics +.RS 4 +Enables intrinsics for SHA\-1 crypto hash function\&. +.RE +.PP +\-XX:+UseSHA256Intrinsics +.RS 4 +Enables intrinsics for SHA\-224 and SHA\-256 crypto hash functions\&. +.RE +.PP +\-XX:+UseSHA512Intrinsics +.RS 4 +Enables intrinsics for SHA\-384 and SHA\-512 crypto hash functions\&. +.RE +.PP +\-XX:+UseSuperWord +.RS 4 +Enables the transformation of scalar operations into superword operations\&. This option is enabled by default\&. To disable the transformation of scalar operations into superword operations, specify +\fB\-XX:\-UseSuperWord\fR\&. Only the Java HotSpot Server VM supports this option\&. +.RE .SS "Advanced Serviceability Options" .PP These options provide the ability to gather system information and perform extensive debugging\&. @@ -2474,9 +2607,10 @@ .PP \-XX:+PrintConcurrentLocks .RS 4 -Enables printing of j locks after a event\&. By default, this option is disabled\&. +Enables printing of locks after a event\&. By default, this option is disabled\&. .sp -Enables printing of j\fBava\&.util\&.concurrent\fR +Enables printing of +\fBjava\&.util\&.concurrent\fR locks after a \fBControl+C\fR event (\fBSIGTERM\fR)\&. By default, this option is disabled\&. @@ -2678,7 +2812,9 @@ \fBg\fR or \fBG\fR -to indicate gigabytes\&. The default value is chosen at runtime based on system configuration\&. For more information, see Garbage Collector Ergonomics at http://docs\&.oracle\&.com/javase/8/docs/technotes/guides/vm/gc\-ergonomics\&.html +to indicate gigabytes\&. The default value is chosen at runtime based on system configuration\&. See the section "Ergonomics" in +\fIJava SE HotSpot Virtual Machine Garbage Collection Tuning Guide\fR +at http://docs\&.oracle\&.com/javase/8/docs/technotes/guides/vm/gctuning/index\&.html\&. .sp The following examples show how to set the size of allocated memory to 6 MB using various units: .sp @@ -2798,7 +2934,9 @@ \fB\-XX:InitialHeapSize\fR and \fB\-XX:MaxHeapSize\fR -are often set to the same value\&. For more information, see Garbage Collector Ergonomics at http://docs\&.oracle\&.com/javase/8/docs/technotes/guides/vm/gc\-ergonomics\&.html +are often set to the same value\&. See the section "Ergonomics" in +\fIJava SE HotSpot Virtual Machine Garbage Collection Tuning Guide\fR +at http://docs\&.oracle\&.com/javase/8/docs/technotes/guides/vm/gctuning/index\&.html\&. .sp The following examples show how to set the maximum allowed size of allocated memory to 80 MB using various units: .sp @@ -3238,6 +3376,13 @@ Enables the use of the serial garbage collector\&. This is generally the best choice for small and simple applications that do not require any special functionality from garbage collection\&. By default, this option is disabled and the collector is chosen automatically based on the configuration of the machine and type of the JVM\&. .RE .PP +\-XX:+UseSHM +.RS 4 +On Linux, enables the JVM to use shared memory to setup large pages\&. +.sp +For more information, see "Large Pages"\&. +.RE +.PP \-XX:+UseStringDeduplication .RS 4 Enables string deduplication\&. By default, this option is disabled\&. To use this option, you must enable the garbage\-first (G1) garbage collector\&. See the @@ -3352,9 +3497,7 @@ .PP The following examples show how to use experimental tuning flags to either optimize throughput or to provide lower response time\&. .PP -\fBExample 1\fR -.br -Tuning for Higher Throughput +\fBExample 1 \fRTuning for Higher Throughput .RS 4 .sp .if n \{\ @@ -3369,9 +3512,7 @@ .\} .RE .PP -\fBExample 2\fR -.br -Tuning for Lower Response Time +\fBExample 2 \fRTuning for Lower Response Time .RS 4 .sp .if n \{\ @@ -3385,6 +3526,195 @@ .RE .\} .RE +.SH "LARGE PAGES" +.PP +Also known as huge pages, large pages are memory pages that are significantly larger than the standard memory page size (which varies depending on the processor and operating system)\&. Large pages optimize processor Translation\-Lookaside Buffers\&. +.PP +A Translation\-Lookaside Buffer (TLB) is a page translation cache that holds the most\-recently used virtual\-to\-physical address translations\&. TLB is a scarce system resource\&. A TLB miss can be costly as the processor must then read from the hierarchical page table, which may require multiple memory accesses\&. By using a larger memory page size, a single TLB entry can represent a larger memory range\&. There will be less pressure on TLB, and memory\-intensive applications may have better performance\&. +.PP +However, large pages page memory can negatively affect system performance\&. For example, when a large mount of memory is pinned by an application, it may create a shortage of regular memory and cause excessive paging in other applications and slow down the entire system\&. Also, a system that has been up for a long time could produce excessive fragmentation, which could make it impossible to reserve enough large page memory\&. When this happens, either the OS or JVM reverts to using regular pages\&. +.SS "Large Pages Support" +.PP +Solaris and Linux support large pages\&. +.sp +.it 1 an-trap +.nr an-no-space-flag 1 +.nr an-break-flag 1 +.br +.ps +1 +\fBSolaris\fR +.RS 4 +.PP +Solaris 9 and later include Multiple Page Size Support (MPSS); no additional configuration is necessary\&. See http://www\&.oracle\&.com/technetwork/server\-storage/solaris10/overview/solaris9\-features\-scalability\-135663\&.html\&. +.RE +.sp +.it 1 an-trap +.nr an-no-space-flag 1 +.nr an-break-flag 1 +.br +.ps +1 +\fBLinux\fR +.RS 4 +.PP +The 2\&.6 kernel supports large pages\&. Some vendors have backported the code to their 2\&.4\-based releases\&. To check if your system can support large page memory, try the following: +.sp +.if n \{\ +.RS 4 +.\} +.nf +\fB# cat /proc/meminfo | grep Huge\fR +\fBHugePages_Total: 0\fR +\fBHugePages_Free: 0\fR +\fBHugepagesize: 2048 kB\fR + +.fi +.if n \{\ +.RE +.\} +.PP +If the output shows the three "Huge" variables, then your system can support large page memory but it needs to be configured\&. If the command prints nothing, then your system does not support large pages\&. To configure the system to use large page memory, login as +\fBroot\fR, and then follow these steps: +.sp +.RS 4 +.ie n \{\ +\h'-04' 1.\h'+01'\c +.\} +.el \{\ +.sp -1 +.IP " 1." 4.2 +.\} +If you are using the option +\fB\-XX:+UseSHM\fR +(instead of +\fB\-XX:+UseHugeTLBFS\fR), then increase the +\fBSHMMAX\fR +value\&. It must be larger than the Java heap size\&. On a system with 4 GB of physical RAM (or less), the following will make all the memory sharable: +.sp +.if n \{\ +.RS 4 +.\} +.nf +\fB# echo 4294967295 > /proc/sys/kernel/shmmax\fR + +.fi +.if n \{\ +.RE +.\} +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04' 2.\h'+01'\c +.\} +.el \{\ +.sp -1 +.IP " 2." 4.2 +.\} +If you are using the option +\fB\-XX:+UseSHM\fR +or +\fB\-XX:+UseHugeTLBFS\fR, then specify the number of large pages\&. In the following example, 3 GB of a 4 GB system are reserved for large pages (assuming a large page size of 2048kB, then 3 GB = 3 * 1024 MB = 3072 MB = 3072 * 1024 kB = 3145728 kB and 3145728 kB / 2048 kB = 1536): +.sp +.if n \{\ +.RS 4 +.\} +.nf +\fB# echo 1536 > /proc/sys/vm/nr_hugepages\fR + +.fi +.if n \{\ +.RE +.\} +.RE +.if n \{\ +.sp +.\} +.RS 4 +.it 1 an-trap +.nr an-no-space-flag 1 +.nr an-break-flag 1 +.br +.ps +1 +\fBNote\fR +.ps -1 +.br +.TS +allbox tab(:); +l. +T{ +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +Note that the values contained in +\fB/proc\fR +will reset after you reboot your system, so may want to set them in an initialization script (for example, +\fBrc\&.local\fR +or +\fBsysctl\&.conf\fR)\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +If you configure (or resize) the OS kernel parameters +\fB/proc/sys/kernel/shmmax\fR +or +\fB/proc/sys/vm/nr_hugepages\fR, Java processes may allocate large pages for areas in addition to the Java heap\&. These steps can allocate large pages for the following areas: +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +Java heap +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +Code cache +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +The marking bitmap data structure for the parallel GC +.RE +.sp +Consequently, if you configure the +\fBnr_hugepages\fR +parameter to the size of the Java heap, then the JVM can fail in allocating the code cache areas on large pages because these areas are quite large in size\&. +.RE +T} +.TE +.sp 1 +.sp .5v +.RE +.RE .SH "EXIT STATUS" .PP The following exit values are typically returned by the launcher when the launcher is called with the wrong arguments, serious errors, or exceptions thrown by the JVM\&. However, a Java application may choose to return any value by using the API call @@ -3456,6 +3786,17 @@ .\} jar(1) .RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +jstat(1) +.RE .br 'pl 8.5i 'bp --- ./jdk/src/bsd/doc/man/javac.1 Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/src/bsd/doc/man/javac.1 Wed Feb 04 12:14:43 2015 -0800 @@ -1,2116 +1,1370 @@ '\" t -.\" Copyright (c) 1994, 2014, Oracle and/or its affiliates. All rights reserved. -.\" -.\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -.\" -.\" This code is free software; you can redistribute it and/or modify it -.\" under the terms of the GNU General Public License version 2 only, as -.\" published by the Free Software Foundation. -.\" -.\" This code is distributed in the hope that it will be useful, but WITHOUT -.\" ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -.\" FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -.\" version 2 for more details (a copy is included in the LICENSE file that -.\" accompanied this code). -.\" -.\" You should have received a copy of the GNU General Public License version -.\" 2 along with this work; if not, write to the Free Software Foundation, -.\" Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -.\" -.\" Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -.\" or visit www.oracle.com if you need additional information or have any -.\" questions. -.\" -.\" Title: javac -.\" Language: English -.\" Date: 8 August 2014 -.\" SectDesc: Basic Tools -.\" Software: JDK 8 -.\" Arch: generic -.\" Part Number: E38207-03 +.\" Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved. +.\" +.\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +.\" +.\" This code is free software; you can redistribute it and/or modify it +.\" under the terms of the GNU General Public License version 2 only, as +.\" published by the Free Software Foundation. +.\" +.\" This code is distributed in the hope that it will be useful, but WITHOUT +.\" ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +.\" FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +.\" version 2 for more details (a copy is included in the LICENSE file that +.\" accompanied this code). +.\" +.\" You should have received a copy of the GNU General Public License version +.\" 2 along with this work; if not, write to the Free Software Foundation, +.\" Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +.\" +.\" Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +.\" or visit www.oracle.com if you need additional information or have any +.\" questions. +.\" +.\" Arch: generic +.\" Software: JDK 8 +.\" Date: 03 March 2015 +.\" SectDesc: Basic Tools +.\" Title: javac.1 .\" .if n .pl 99999 -.TH "javac" "1" "8 August 2014" "JDK 8" "Basic Tools" -.\" ----------------------------------------------------------------- -.\" * Define some portability stuff -.\" ----------------------------------------------------------------- -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.\" http://bugs.debian.org/507673 -.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.ie \n(.g .ds Aq \(aq -.el .ds Aq ' -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.SH "NAME" +.TH javac 1 "03 March 2015" "JDK 8" "Basic Tools" +.\" ----------------------------------------------------------------- +.\" * Define some portability stuff +.\" ----------------------------------------------------------------- +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.\" http://bugs.debian.org/507673 +.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.ie \n(.g .ds Aq \(aq +.el .ds Aq ' +.\" ----------------------------------------------------------------- +.\" * set default formatting +.\" ----------------------------------------------------------------- +.\" disable hyphenation +.nh +.\" disable justification (adjust text to left margin only) +.ad l +.\" ----------------------------------------------------------------- +.\" * MAIN CONTENT STARTS HERE * +.\" ----------------------------------------------------------------- + +.SH NAME javac \- Reads Java class and interface definitions and compiles them into bytecode and class files\&. -.SH "SYNOPSIS" -.sp -.if n \{\ -.RS 4 -.\} -.nf +.SH SYNOPSIS +.sp +.nf + \fBjavac\fR [ \fIoptions\fR ] [ \fIsourcefiles\fR ] [ \fIclasses\fR] [ \fI@argfiles\fR ] -.fi -.if n \{\ -.RE -.\} +.fi +.sp +Arguments can be in any order: +.TP +\fIoptions\fR +Command-line options\&. See Options\&. +.TP +\fIsourcefiles\fR +One or more source files to be compiled (such as \f3MyClass\&.java\fR)\&. +.TP +\fIclasses\fR +One or more classes to be processed for annotations (such as \f3MyPackage\&.MyClass\fR)\&. +.TP +\fI@argfiles\fR +One or more files that list options and source files\&. The \f3-J\fR options are not allowed in these files\&. See Command-Line Argument Files\&. +.SH DESCRIPTION +The \f3javac\fR command reads class and interface definitions, written in the Java programming language, and compiles them into bytecode class files\&. The \f3javac\fR command can also process annotations in Java source files and classes\&. .PP -Arguments can be in any order: +There are two ways to pass source code file names to \f3javac\fR\&. +.TP 0.2i +\(bu +For a small number of source files, list the file names on the command line\&. +.TP 0.2i +\(bu +For a large number of source files, list the file names in a file that is separated by blanks or line breaks\&. Use the list file name preceded by an at sign (@) with the \f3javac\fR command\&. .PP -\fIoptions\fR -.RS 4 -Command\-line options\&. See Options\&. -.RE +Source code file names must have \&.java suffixes, class file names must have \&.class suffixes, and both source and class files must have root names that identify the class\&. For example, a class called \f3MyClass\fR would be written in a source file called \f3MyClass\&.java\fR and compiled into a bytecode class file called \f3MyClass\&.class\fR\&. .PP -\fIsourcefiles\fR -.RS 4 -One or more source files to be compiled (such as -\fBMyClass\&.java\fR)\&. -.RE +Inner class definitions produce additional class files\&. These class files have names that combine the inner and outer class names, such as \f3MyClass$MyInnerClass\&.class\fR\&. .PP -\fIclasses\fR -.RS 4 -One or more classes to be processed for annotations (such as -\fBMyPackage\&.MyClass\fR)\&. -.RE +Arrange source files in a directory tree that reflects their package tree\&. For example, if all of your source files are in \f3/workspace\fR, then put the source code for \f3com\&.mysoft\&.mypack\&.MyClass\fR in \f3/workspace/com/mysoft/mypack/MyClass\&.java\fR\&. .PP -\fI@argfiles\fR -.RS 4 -One or more files that list options and source files\&. The -\fB\-J\fR -options are not allowed in these files\&. See Command\-Line Argument Files\&. -.RE -.SH "DESCRIPTION" +By default, the compiler puts each class file in the same directory as its source file\&. You can specify a separate destination directory with the \f3-d\fR option\&. +.SH OPTIONS +The compiler has a set of standard options that are supported on the current development environment\&. An additional set of nonstandard options are specific to the current virtual machine and compiler implementations and are subject to change in the future\&. Nonstandard options begin with the \f3-X\fR option\&. +.TP 0.2i +\(bu +See also Cross-Compilation Options +.TP 0.2i +\(bu +See also Nonstandard Options +.SS STANDARD\ OPTIONS +.TP +-A\fIkey\fR[\fI=value\fR] +.br +Specifies options to pass to annotation processors\&. These options are not interpreted by \f3javac\fR directly, but are made available for use by individual processors\&. The \f3key\fR value should be one or more identifiers separated by a dot (\&.)\&. +.TP +-cp \fIpath\fR or -classpath \fIpath\fR +.br +Specifies where to find user class files, and (optionally) annotation processors and source files\&. This class path overrides the user class path in the \f3CLASSPATH\fR environment variable\&. If neither \f3CLASSPATH\fR, \f3-cp\fR nor \f3-classpath\fR is specified, then the user \fIclass path\fR is the current directory\&. See Setting the Class Path\&. + +If the \f3-sourcepath\fR option is not specified, then the user class path is also searched for source files\&. + +If the \f3-processorpath\fR option is not specified, then the class path is also searched for annotation processors\&. +.TP +-Djava\&.ext\&.dirs=\fIdirectories\fR +.br +Overrides the location of installed extensions\&. +.TP +-Djava\&.endorsed\&.dirs=\fIdirectories\fR +.br +Overrides the location of the endorsed standards path\&. +.TP +-d \fIdirectory\fR +.br +Sets the destination directory for class files\&. The directory must already exist because \f3javac\fR does not create it\&. If a class is part of a package, then \f3javac\fR puts the class file in a subdirectory that reflects the package name and creates directories as needed\&. + +If you specify \f3-d\fR\f3/home/myclasses\fR and the class is called \f3com\&.mypackage\&.MyClass\fR, then the class file is \f3/home/myclasses/com/mypackage/MyClass\&.class\fR\&. + +If the \fI-d\fR option is not specified, then \f3javac\fR puts each class file in the same directory as the source file from which it was generated\&. + +\fINote:\fR The directory specified by the \fI-d\fR option is not automatically added to your user class path\&. +.TP +-deprecation +.br +Shows a description of each use or override of a deprecated member or class\&. Without the \f3-deprecation\fR option, \f3javac\fR shows a summary of the source files that use or override deprecated members or classes\&. The \f3-deprecation\fR option is shorthand for \f3-Xlint:deprecation\fR\&. +.TP +-encoding \fIencoding\fR +.br +Sets the source file encoding name, such as EUC-JP and UTF-8\&. If the \f3-encoding\fR option is not specified, then the platform default converter is used\&. +.TP +-endorseddirs \fIdirectories\fR +.br +Overrides the location of the endorsed standards path\&. +.TP +-extdirs \fIdirectories\fR +.br +Overrides the location of the \f3ext\fR directory\&. The directories variable is a colon-separated list of directories\&. Each JAR file in the specified directories is searched for class files\&. All JAR files found become part of the class path\&. + +If you are cross-compiling (compiling classes against bootstrap and extension classes of a different Java platform implementation), then this option specifies the directories that contain the extension classes\&. See Cross-Compilation Options for more information\&. +.TP +-g +.br +Generates all debugging information, including local variables\&. By default, only line number and source file information is generated\&. +.TP +-g:none +.br +Does not generate any debugging information\&. +.TP +-g:[\fIkeyword list\fR] +.br +Generates only some kinds of debugging information, specified by a comma separated list of keywords\&. Valid keywords are: +.RS +.TP +source +Source file debugging information\&. +.TP +lines +Line number debugging information\&. +.TP +vars +Local variable debugging information\&. +.RE + +.TP +-help +.br +Prints a synopsis of standard options\&. +.TP +-implicit:[\fIclass, none\fR] +.br +Controls the generation of class files for implicitly loaded source files\&. To automatically generate class files, use \f3-implicit:class\fR\&. To suppress class file generation, use \f3-implicit:none\fR\&. If this option is not specified, then the default is to automatically generate class files\&. In this case, the compiler issues a warning if any such class files are generated when also doing annotation processing\&. The warning is not issued when the \f3-implicit\fR option is set explicitly\&. See Searching for Types\&. +.TP +-J\fIoption\fR +.br +Passes \f3option\fR to the Java Virtual Machine (JVM), where option is one of the options described on the reference page for the Java launcher\&. For example, \f3-J-Xms48m\fR sets the startup memory to 48 MB\&. See java(1)\&. + +\fINote:\fR The \fICLASSPATH\fR, \f3-classpath\fR, \f3-bootclasspath\fR, and \f3-extdirs\fR options do not specify the classes used to run \f3javac\fR\&. Trying to customize the compiler implementation with these options and variables is risky and often does not accomplish what you want\&. If you must customize the complier implementation, then use the \f3-J\fR option to pass options through to the underlying \f3\fRJava launcher\&. +.TP +-nowarn +.br +Disables warning messages\&. This option operates the same as the \f3-Xlint:none\fR option\&. +.TP +-parameters +.br +Stores formal parameter names of constructors and methods in the generated class file so that the method \f3java\&.lang\&.reflect\&.Executable\&.getParameters\fR from the Reflection API can retrieve them\&. +.TP +-proc: [\fInone\fR, \fIonly\fR] +.br +Controls whether annotation processing and compilation are done\&. \f3-proc:none\fR means that compilation takes place without annotation processing\&. \f3-proc:only\fR means that only annotation processing is done, without any subsequent compilation\&. +.TP +-processor \fIclass1\fR [,\fIclass2\fR,\fIclass3\fR\&.\&.\&.] +.br +Names of the annotation processors to run\&. This bypasses the default discovery process\&. +.TP +-processorpath \fIpath\fR +.br +Specifies where to find annotation processors\&. If this option is not used, then the class path is searched for processors\&. +.TP +-s \fIdir\fR +.br +Specifies the directory where to place the generated source files\&. The directory must already exist because \f3javac\fR does not create it\&. If a class is part of a package, then the compiler puts the source file in a subdirectory that reflects the package name and creates directories as needed\&. + +If you specify \f3-s /home/mysrc\fR and the class is called \f3com\&.mypackage\&.MyClass\fR, then the source file is put in \f3/home/mysrc/com/mypackage/MyClass\&.java\fR\&. +.TP +-source \fIrelease\fR +.br +Specifies the version of source code accepted\&. The following values for \f3release\fR are allowed: +.RS +.TP +1\&.3 +The compiler does not support assertions, generics, or other language features introduced after Java SE 1\&.3\&. +.TP +1\&.4 +The compiler accepts code containing assertions, which were introduced in Java SE 1\&.4\&. +.TP +1\&.5 +The compiler accepts code containing generics and other language features introduced in Java SE 5\&. +.TP +5 +Synonym for 1\&.5\&. +.TP +1\&.6 +No language changes were introduced in Java SE 6\&. However, encoding errors in source files are now reported as errors instead of warnings as in earlier releases of Java Platform, Standard Edition\&. +.TP +6 +Synonym for 1\&.6\&. +.TP +1\&.7 +The compiler accepts code with features introduced in Java SE 7\&. +.TP +7 +Synonym for 1\&.7\&. +.TP +1\&.8 +This is the default value\&. The compiler accepts code with features introduced in Java SE 8\&. +.TP +8 +Synonym for 1\&.8\&. +.RE + +.TP +-sourcepath \fIsourcepath\fR +.br +Specifies the source code path to search for class or interface definitions\&. As with the user class path, source path entries are separated by colons (:) on Oracle Solaris and semicolons on Windows and can be directories, JAR archives, or ZIP archives\&. If packages are used, then the local path name within the directory or archive must reflect the package name\&. + +\fINote:\fR Classes found through the class path might be recompiled when their source files are also found\&. See Searching for Types\&. +.TP +-verbose +.br +Uses verbose output, which includes information about each class loaded and each source file compiled\&. +.TP +-version +.br +Prints release information\&. +.TP +-werror +.br +Terminates compilation when warnings occur\&. +.TP +-X +.br +Displays information about nonstandard options and exits\&. +.SS CROSS-COMPILATION\ OPTIONS +By default, classes are compiled against the bootstrap and extension classes of the platform that \f3javac\fR shipped with\&. But \f3javac\fR also supports cross-compiling, where classes are compiled against a bootstrap and extension classes of a different Java platform implementation\&. It is important to use the \f3-bootclasspath\fR and \f3-extdirs\fR options when cross-compiling\&. +.TP +-target \fIversion\fR +.br +Generates class files that target a specified release of the virtual machine\&. Class files will run on the specified target and on later releases, but not on earlier releases of the JVM\&. Valid targets are 1\&.1, 1\&.2, 1\&.3, 1\&.4, 1\&.5 (also 5), 1\&.6 (also 6), 1\&.7 (also 7), and 1\&.8 (also 8)\&. + +The default for the \f3-target\fR option depends on the value of the \f3-source\fR option: +.RS +.TP 0.2i +\(bu +If the \f3-source\fR option is not specified, then the value of the \f3-target\fR option is 1\&.8 +.TP 0.2i +\(bu +If the \f3-source\fR option is 1\&.2, then the value of the \f3-target\fR option is 1\&.4 +.TP 0.2i +\(bu +If the \f3-source\fR option is 1\&.3, then the value of the \f3-target\fR option is 1\&.4 +.TP 0.2i +\(bu +If the \f3-source\fR option is 1\&.5, then the value of the \f3-target\fR option is 1\&.8 +.TP 0.2i +\(bu +If the \f3-source\fR option is 1\&.6, then the value of the \f3-target\fR is option 1\&.8 +.TP 0.2i +\(bu +If the \f3-source\fR option is 1\&.7, then the value of the \f3-target\fR is option 1\&.8 +.TP 0.2i +\(bu +For all other values of the \f3-source\fR option, the value of the \f3-target\fR option is the value of the \f3-source\fR option\&. +.RE + +.TP +-bootclasspath \fIbootclasspath\fR +.br +Cross-compiles against the specified set of boot classes\&. As with the user class path, boot class path entries are separated by colons (:) and can be directories, JAR archives, or ZIP archives\&. +.SS COMPACT\ PROFILE\ OPTION +Beginning with JDK 8, the \f3javac\fR compiler supports compact profiles\&. With compact profiles, applications that do not require the entire Java platform can be deployed and run with a smaller footprint\&. The compact profiles feature could be used to shorten the download time for applications from app stores\&. This feature makes for more compact deployment of Java applications that bundle the JRE\&. This feature is also useful in small devices\&. .PP -The -\fBjavac\fR -command reads class and interface definitions, written in the Java programming language, and compiles them into bytecode class files\&. The -\fBjavac\fR -command can also process annotations in Java source files and classes\&. +The supported profile values are \f3compact1\fR, \f3compact2\fR, and \f3compact3\fR\&. These are additive layers\&. Each higher-numbered compact profile contains all of the APIs in profiles with smaller number names\&. +.TP +-profile +.br +When using compact profiles, this option specifies the profile name when compiling\&. For example: +.sp +.nf +\f3javac \-profile compact1 Hello\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp + + +javac does not compile source code that uses any Java SE APIs that is not in the specified profile\&. Here is an example of the error message that results from attempting to compile such source code: +.sp +.nf +\f3cd jdk1\&.8\&.0/bin\fP +.fi +.nf +\f3\&./javac \-profile compact1 Paint\&.java\fP +.fi +.nf +\f3Paint\&.java:5: error: Applet is not available in profile \&'compact1\&'\fP +.fi +.nf +\f3import java\&.applet\&.Applet;\fP +.fi +.nf +\f3\fP +.fi +.sp + + +In this example, you can correct the error by modifying the source to not use the \f3Applet\fR class\&. You could also correct the error by compiling without the -profile option\&. Then the compilation would be run against the full set of Java SE APIs\&. (None of the compact profiles include the \f3Applet\fR class\&.) + +An alternative way to compile with compact profiles is to use the \f3-bootclasspath\fR option to specify a path to an \f3rt\&.jar\fR file that specifies a profile\&'s image\&. Using the \f3-profile\fR option instead does not require a profile image to be present on the system at compile time\&. This is useful when cross-compiling\&. +.SS NONSTANDARD\ OPTIONS +.TP +-Xbootclasspath/p:\fIpath\fR +.br +Adds a suffix to the bootstrap class path\&. +.TP +-Xbootclasspath/a:\fIpath\fR +.br +Adds a prefix to the bootstrap class path\&. +.TP +-Xbootclasspath/:\fIpath\fR +.br +Overrides the location of the bootstrap class files\&. +.TP +-Xdoclint:[-]\fIgroup\fR [\fI/access\fR] +.br +Enables or disables specific groups of checks, where \fIgroup\fR is one of the following values: \f3accessibility\fR, \f3syntax\fR, \f3reference\fR, \f3html\fR or \f3missing\fR\&. For more information about these groups of checks see the \f3-Xdoclint\fR option of the \f3javadoc\fR command\&. The \f3-Xdoclint\fR option is disabled by default in the \f3javac\fR command\&. + +The variable \fIaccess\fR specifies the minimum visibility level of classes and members that the \f3-Xdoclint\fR option checks\&. It can have one of the following values (in order of most to least visible) : \f3public\fR, \f3protected\fR, \f3package\fR and \f3private\fR\&. For example, the following option checks classes and members (with all groups of checks) that have the access level protected and higher (which includes protected, package and public): +.sp +.nf +\f3\-Xdoclint:all/protected\fP +.fi +.nf +\f3\fP +.fi +.sp + + +The following option enables all groups of checks for all access levels, except it will not check for HTML errors for classes and members that have access level package and higher (which includes package and public): +.sp +.nf +\f3\-Xdoclint:all,\-html/package\fP +.fi +.nf +\f3\fP +.fi +.sp + +.TP +-Xdoclint:none +.br +Disables all groups of checks\&. +.TP +-Xdoclint:all[\fI/access\fR] +.br +Enables all groups of checks\&. +.TP +-Xlint +.br +\fI\fREnables all recommended warnings\&. In this release, enabling all available warnings is recommended\&. +.TP +-Xlint:all +.br +\fI\fREnables all recommended warnings\&. In this release, enabling all available warnings is recommended\&. +.TP +-Xlint:none +.br +Disables all warnings\&. +.TP +-Xlint:\fIname\fR +.br +Disables warning name\&. See Enable or Disable Warnings with the -Xlint Option for a list of warnings you can disable with this option\&. +.TP +-Xlint:\fI-name\fR +.br +Disables warning name\&. See Enable or Disable Warnings with the -Xlint Option with the \f3-Xlint\fR option to get a list of warnings that you can disable with this option\&. +.TP +-Xmaxerrs \fInumber\fR +.br +Sets the maximum number of errors to print\&. +.TP +-Xmaxwarns \fInumber\fR +.br +Sets the maximum number of warnings to print\&. +.TP +-Xstdout \fIfilename\fR +.br +Sends compiler messages to the named file\&. By default, compiler messages go to \f3System\&.err\fR\&. +.TP +-Xprefer:[\fInewer,source\fR] +.br +Specifies which file to read when both a source file and class file are found for a type\&. (See Searching for Types)\&. If the \f3-Xprefer:newer\fR option is used, then it reads the newer of the source or class file for a type (default)\&. If the \f3-Xprefer:source\fR option is used, then it reads the source file\&. Use -\f3Xprefer:source\fR when you want to be sure that any annotation processors can access annotations declared with a retention policy of \f3SOURCE\fR\&. +.TP +-Xpkginfo:[\fIalways\fR,\fIlegacy\fR,\fInonempty\fR] +.br +Control whether javac generates \f3package-info\&.class\fR files from package-info\&.java files\&. Possible mode arguments for this option include the following\&. +.RS +.TP +always +Always generate a \f3package-info\&.class\fR file for every \f3package-info\&.java\fR file\&. This option may be useful if you use a build system such as Ant, which checks that each \f3\&.java\fR file has a corresponding \f3\&.class\fR file\&. +.TP +legacy +Generate a \f3package-info\&.class\fR file only if package-info\&.java contains annotations\&. Don\&'t generate a \f3package-info\&.class\fR file if package-info\&.java only contains comments\&. + +\fINote:\fR A \f3package-info\&.class\fR file might be generated but be empty if all the annotations in the package-info\&.java file have \f3RetentionPolicy\&.SOURCE\fR\&. +.TP +nonempty +Generate a \f3package-info\&.class\fR file only if package-info\&.java contains annotations with \f3RetentionPolicy\&.CLASS\fR or \f3RetentionPolicy\&.RUNTIME\fR\&. +.RE + +.TP +-Xprint +.br +Prints a textual representation of specified types for debugging purposes\&. Perform neither annotation processing nor compilation\&. The format of the output could change\&. +.TP +-XprintProcessorInfo +.br +Prints information about which annotations a processor is asked to process\&. +.TP +-XprintRounds +.br +Prints information about initial and subsequent annotation processing rounds\&. +.SH ENABLE\ OR\ DISABLE\ WARNINGS\ WITH\ THE\ -XLINT\ OPTION +Enable warning \fIname\fR with the \f3-Xlint:name\fR option, where \f3name\fR is one of the following warning names\&. Note that you can disable a warning with the \f3-Xlint:-name:\fR option\&. +.TP +cast +Warns about unnecessary and redundant casts, for example: +.sp +.nf +\f3String s = (String) "Hello!"\fP +.fi +.nf +\f3\fP +.fi +.sp + +.TP +classfile +Warns about issues related to class file contents\&. +.TP +deprecation +Warns about the use of deprecated items, for example: +.sp +.nf +\f3java\&.util\&.Date myDate = new java\&.util\&.Date();\fP +.fi +.nf +\f3int currentDay = myDate\&.getDay();\fP +.fi +.nf +\f3\fP +.fi +.sp + + +The method \f3java\&.util\&.Date\&.getDay\fR has been deprecated since JDK 1\&.1 +.TP +dep-ann +Warns about items that are documented with an \f3@deprecated\fR Javadoc comment, but do not have a \f3@Deprecated\fR annotation, for example: +.sp +.nf +\f3/**\fP +.fi +.nf +\f3 * @deprecated As of Java SE 7, replaced by {@link #newMethod()}\fP +.fi +.nf +\f3 */\fP +.fi +.nf +\f3public static void deprecatedMethood() { }\fP +.fi +.nf +\f3public static void newMethod() { }\fP +.fi +.nf +\f3\fP +.fi +.sp + +.TP +divzero +Warns about division by the constant integer 0, for example: +.sp +.nf +\f3int divideByZero = 42 / 0;\fP +.fi +.nf +\f3\fP +.fi +.sp + +.TP +empty +Warns about empty statements after \f3if\fRstatements, for example: +.sp +.nf +\f3class E {\fP +.fi +.nf +\f3 void m() {\fP +.fi +.nf +\f3 if (true) ;\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3}\fP +.fi +.nf +\f3\fP +.fi +.sp + +.TP +fallthrough +Checks the switch blocks for fall-through cases and provides a warning message for any that are found\&. Fall-through cases are cases in a switch block, other than the last case in the block, whose code does not include a break statement, allowing code execution to fall through from that case to the next case\&. For example, the code following the case 1 label in this switch block does not end with a break statement: +.sp +.nf +\f3switch (x) {\fP +.fi +.nf +\f3case 1:\fP +.fi +.nf +\f3 System\&.out\&.println("1");\fP +.fi +.nf +\f3 // No break statement here\&.\fP +.fi +.nf +\f3case 2:\fP +.fi +.nf +\f3 System\&.out\&.println("2");\fP +.fi +.nf +\f3}\fP +.fi +.nf +\f3\fP +.fi +.sp + + +If the \f3-Xlint:fallthrough\fR option was used when compiling this code, then the compiler emits a warning about possible fall-through into case, with the line number of the case in question\&. +.TP +finally +Warns about \f3finally\fR clauses that cannot complete normally, for example: +.sp +.nf +\f3public static int m() {\fP +.fi +.nf +\f3 try {\fP +.fi +.nf +\f3 throw new NullPointerException();\fP +.fi +.nf +\f3 } catch (NullPointerException(); {\fP +.fi +.nf +\f3 System\&.err\&.println("Caught NullPointerException\&.");\fP +.fi +.nf +\f3 return 1;\fP +.fi +.nf +\f3 } finally {\fP +.fi +.nf +\f3 return 0;\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3\fP +.fi +.sp + + +The compiler generates a warning for the \f3finally\fR block in this example\&. When the \f3int\fR method is called, it returns a value of 0\&. A \f3finally\fR block executes when the \f3try\fR block exits\&. In this example, when control is transferred to the \f3catch\fR block, the \f3int\fR method exits\&. However, the \f3finally\fR block must execute, so it is executed, even though control was transferred outside the method\&. +.TP +options +Warns about issues that related to the use of command-line options\&. See Cross-Compilation Options\&. +.TP +overrides +Warns about issues regarding method overrides\&. For example, consider the following two classes: +.sp +.nf +\f3public class ClassWithVarargsMethod {\fP +.fi +.nf +\f3 void varargsMethod(String\&.\&.\&. s) { }\fP +.fi +.nf +\f3}\fP +.fi +.nf +\f3\fP +.fi +.nf +\f3public class ClassWithOverridingMethod extends ClassWithVarargsMethod {\fP +.fi +.nf +\f3 @Override\fP +.fi +.nf +\f3 void varargsMethod(String[] s) { }\fP +.fi +.nf +\f3}\fP +.fi +.nf +\f3\fP +.fi +.sp + + +The compiler generates a warning similar to the following:\&. +.sp +.nf +\f3warning: [override] varargsMethod(String[]) in ClassWithOverridingMethod \fP +.fi +.nf +\f3overrides varargsMethod(String\&.\&.\&.) in ClassWithVarargsMethod; overriding\fP +.fi +.nf +\f3method is missing \&'\&.\&.\&.\&'\fP +.fi +.nf +\f3\fP +.fi +.sp + + +When the compiler encounters a \f3varargs\fR method, it translates the \f3varargs\fR formal parameter into an array\&. In the method \f3ClassWithVarargsMethod\&.varargsMethod\fR, the compiler translates the \f3varargs\fR formal parameter \f3String\&.\&.\&. s\fR to the formal parameter \f3String[] s\fR, an array, which matches the formal parameter of the method \f3ClassWithOverridingMethod\&.varargsMethod\fR\&. Consequently, this example compiles\&. +.TP +path +Warns about invalid path elements and nonexistent path directories on the command line (with regard to the class path, the source path, and other paths)\&. Such warnings cannot be suppressed with the \f3@SuppressWarnings\fR annotation, for example: +.sp +.nf +\f3javac \-Xlint:path \-classpath /nonexistentpath Example\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp + +.TP +processing +Warn about issues regarding annotation processing\&. The compiler generates this warning when you have a class that has an annotation, and you use an annotation processor that cannot handle that type of exception\&. For example, the following is a simple annotation processor: + +\fISource file AnnocProc\&.java\fR: +.sp +.nf +\f3import java\&.util\&.*;\fP +.fi +.nf +\f3import javax\&.annotation\&.processing\&.*;\fP +.fi +.nf +\f3import javax\&.lang\&.model\&.*;\fP +.fi +.nf +\f3import\&.javaz\&.lang\&.model\&.element\&.*;\fP +.fi +.nf +\f3\fP +.fi +.nf +\f3@SupportedAnnotationTypes("NotAnno")\fP +.fi +.nf +\f3public class AnnoProc extends AbstractProcessor {\fP +.fi +.nf +\f3 public boolean process(Set elems, RoundEnvironment renv){\fP +.fi +.nf +\f3 return true;\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3\fP +.fi +.nf +\f3 public SourceVersion getSupportedSourceVersion() {\fP +.fi +.nf +\f3 return SourceVersion\&.latest();\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3}\fP +.fi +.nf +\f3\fP +.fi +.sp + + +\fISource file AnnosWithoutProcessors\&.java\fR: +.sp +.nf +\f3@interface Anno { }\fP +.fi +.nf +\f3\fP +.fi +.nf +\f3@Anno\fP +.fi +.nf +\f3class AnnosWithoutProcessors { }\fP +.fi +.nf +\f3\fP +.fi +.sp + + +The following commands compile the annotation processor \f3AnnoProc\fR, then run this annotation processor against the source file \f3AnnosWithoutProcessors\&.java\fR: +.sp +.nf +\f3javac AnnoProc\&.java\fP +.fi +.nf +\f3javac \-cp \&. \-Xlint:processing \-processor AnnoProc \-proc:only AnnosWithoutProcessors\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp + + +When the compiler runs the annotation processor against the source file \f3AnnosWithoutProcessors\&.java\fR, it generates the following warning: +.sp +.nf +\f3warning: [processing] No processor claimed any of these annotations: Anno\fP +.fi +.nf +\f3\fP +.fi +.sp + + +To resolve this issue, you can rename the annotation defined and used in the class \f3AnnosWithoutProcessors\fR from \f3Anno\fR to \f3NotAnno\fR\&. +.TP +rawtypes +Warns about unchecked operations on raw types\&. The following statement generates a \f3rawtypes\fR warning: +.sp +.nf +\f3void countElements(List l) { \&.\&.\&. }\fP +.fi +.nf +\f3\fP +.fi +.sp + + +The following example does not generate a \f3rawtypes\fR warning +.sp +.nf +\f3void countElements(List l) { \&.\&.\&. }\fP +.fi +.nf +\f3\fP +.fi +.sp + + +\f3List\fR is a raw type\&. However, \f3List\fR is an unbounded wildcard parameterized type\&. Because \f3List\fR is a parameterized interface, always specify its type argument\&. In this example, the \f3List\fR formal argument is specified with an unbounded wildcard (\f3?\fR) as its formal type parameter, which means that the \f3countElements\fR method can accept any instantiation of the \f3List\fR interface\&. +.TP +Serial +Warns about missing \f3serialVersionUID\fR definitions on serializable classes, for example: +.sp +.nf +\f3public class PersistentTime implements Serializable\fP +.fi +.nf +\f3{\fP +.fi +.nf +\f3 private Date time;\fP +.fi +.nf +\f3\fP +.fi +.nf +\f3 public PersistentTime() {\fP +.fi +.nf +\f3 time = Calendar\&.getInstance()\&.getTime();\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3\fP +.fi +.nf +\f3 public Date getTime() {\fP +.fi +.nf +\f3 return time;\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3}\fP +.fi +.nf +\f3\fP +.fi +.sp + + +The compiler generates the following warning: +.sp +.nf +\f3warning: [serial] serializable class PersistentTime has no definition of\fP +.fi +.nf +\f3serialVersionUID\fP +.fi +.nf +\f3\fP +.fi +.sp + + +If a serializable class does not explicitly declare a field named \f3serialVersionUID\fR, then the serialization runtime environment calculates a default \f3serialVersionUID\fR value for that class based on various aspects of the class, as described in the Java Object Serialization Specification\&. However, it is strongly recommended that all serializable classes explicitly declare \f3serialVersionUID\fR values because the default process of computing \f3serialVersionUID\fR vales is highly sensitive to class details that can vary depending on compiler implementations, and as a result, might cause an unexpected \f3InvalidClassExceptions\fR during deserialization\&. To guarantee a consistent \f3serialVersionUID\fR value across different Java compiler implementations, a serializable class must declare an explicit \f3serialVersionUID\fR value\&. +.TP +static +Warns about issues relating to the use of statics, for example: +.sp +.nf +\f3class XLintStatic {\fP +.fi +.nf +\f3 static void m1() { }\fP +.fi +.nf +\f3 void m2() { this\&.m1(); }\fP +.fi +.nf +\f3}\fP +.fi +.nf +\f3\fP +.fi +.sp + + +The compiler generates the following warning: +.sp +.nf +\f3warning: [static] static method should be qualified by type name, \fP +.fi +.nf +\f3XLintStatic, instead of by an expression\fP +.fi +.nf +\f3\fP +.fi +.sp + + +To resolve this issue, you can call the \f3static\fR method \f3m1\fR as follows: +.sp +.nf +\f3XLintStatic\&.m1();\fP +.fi +.nf +\f3\fP +.fi +.sp + + +Alternately, you can remove the \f3static\fR keyword from the declaration of the method \f3m1\fR\&. +.TP +try +Warns about issues relating to use of \f3try\fR blocks, including try-with-resources statements\&. For example, a warning is generated for the following statement because the resource \f3ac\fR declared in the \f3try\fR block is not used: +.sp +.nf +\f3try ( AutoCloseable ac = getResource() ) { // do nothing}\fP +.fi +.nf +\f3\fP +.fi +.sp + +.TP +unchecked +Gives more detail for unchecked conversion warnings that are mandated by the Java Language Specification, for example: +.sp +.nf +\f3List l = new ArrayList();\fP +.fi +.nf +\f3List ls = l; // unchecked warning\fP +.fi +.nf +\f3\fP +.fi +.sp + + +During type erasure, the types \f3ArrayList\fR and \f3List\fR become \f3ArrayList\fR and \f3List\fR, respectively\&. + +The \f3ls\fR command has the parameterized type \f3List\fR\&. When the \f3List\fR referenced by \f3l\fR is assigned to \f3ls\fR, the compiler generates an unchecked warning\&. At compile time, the compiler and JVM cannot determine whether \f3l\fR refers to a \f3List\fR type\&. In this case, \f3l\fR does not refer to a \f3List\fR type\&. As a result, heap pollution occurs\&. + +A heap pollution situation occurs when the \f3List\fR object \f3l\fR, whose static type is \f3List\fR, is assigned to another \f3List\fR object, \f3ls\fR, that has a different static type, \f3List\fR\&. However, the compiler still allows this assignment\&. It must allow this assignment to preserve backward compatibility with releases of Java SE that do not support generics\&. Because of type erasure, \f3List\fR and \f3List\fR both become \f3List\fR\&. Consequently, the compiler allows the assignment of the object \f3l\fR\f3,\fR which has a raw type of \f3List\fR, to the object \f3ls\fR\&. +.TP +varargs +Warns about unsafe usages of variable arguments (\f3varargs\fR) methods, in particular, those that contain non-reifiable arguments, for example: +.sp +.nf +\f3public class ArrayBuilder {\fP +.fi +.nf +\f3 public static void addToList (List listArg, T\&.\&.\&. elements) {\fP +.fi +.nf +\f3 for (T x : elements) {\fP +.fi +.nf +\f3 listArg\&.add(x);\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3}\fP +.fi +.nf +\f3\fP +.fi +.sp + + +\fINote:\fR A non-reifiable type is a type whose type information is not fully available at runtime\&. + +The compiler generates the following warning for the definition of the method \f3ArrayBuilder\&.addToList\fR +.sp +.nf +\f3warning: [varargs] Possible heap pollution from parameterized vararg type T\fP +.fi +.nf +\f3\fP +.fi +.sp + + +When the compiler encounters a varargs method, it translates the \f3varargs\fR formal parameter into an array\&. However, the Java programming language does not permit the creation of arrays of parameterized types\&. In the method \f3ArrayBuilder\&.addToList\fR, the compiler translates the \f3varargs\fR formal parameter \f3T\&.\&.\&.\fR elements to the formal parameter \f3T[]\fR elements, an array\&. However, because of type erasure, the compiler converts the \f3varargs\fR formal parameter to \f3Object[]\fR elements\&. Consequently, there is a possibility of heap pollution\&. +.SH COMMAND-LINE\ ARGUMENT\ FILES +To shorten or simplify the \f3javac\fR command, you can specify one or more files that contain arguments to the \f3javac\fR command (except \f3-J\fR options)\&. This enables you to create \f3javac\fR commands of any length on any operating system\&. .PP -There are two ways to pass source code file names to -\fBjavac\fR\&. -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -For a small number of source files, list the file names on the command line\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -For a large number of source files, list the file names in a file that is separated by blanks or line breaks\&. Use the list file name preceded by an at sign (@) with the -\fBjavac\fR -command\&. -.RE +An argument file can include \f3javac\fR options and source file names in any combination\&. The arguments within a file can be separated by spaces or new line characters\&. If a file name contains embedded spaces, then put the whole file name in double quotation marks\&. .PP -Source code file names must have \&.java suffixes, class file names must have \&.class suffixes, and both source and class files must have root names that identify the class\&. For example, a class called -\fBMyClass\fR -would be written in a source file called -\fBMyClass\&.java\fR -and compiled into a bytecode class file called -\fBMyClass\&.class\fR\&. +File Names within an argument file are relative to the current directory, not the location of the argument file\&. Wild cards (*) are not allowed in these lists (such as for specifying \f3*\&.java\fR)\&. Use of the at sign (@) to recursively interpret files is not supported\&. The \f3-J\fR options are not supported because they are passed to the launcher, which does not support argument files\&. .PP -Inner class definitions produce additional class files\&. These class files have names that combine the inner and outer class names, such as -\fBMyClass$MyInnerClass\&.class\fR\&. +When executing the \f3javac\fR command, pass in the path and name of each argument file with the at sign (@) leading character\&. When the \f3javac\fR command encounters an argument beginning with the at sign (@), it expands the contents of that file into the argument list\&. .PP -Arrange source files in a directory tree that reflects their package tree\&. For example, if all of your source files are in -\fB/workspace\fR, then put the source code for -\fBcom\&.mysoft\&.mypack\&.MyClass\fR -in -\fB/workspace/com/mysoft/mypack/MyClass\&.java\fR\&. +\f3Example 1 Single Argument File\fR .PP -By default, the compiler puts each class file in the same directory as its source file\&. You can specify a separate destination directory with the -\fB\-d\fR -option\&. -.SH "OPTIONS" +You could use a single argument file named \f3argfile\fR to hold all \f3javac\fR arguments: +.sp +.nf +\f3javac @argfile\fP +.fi +.nf +\f3\fP +.fi +.sp +This argument file could contain the contents of both files shown in Example 2 .PP -The compiler has a set of standard options that are supported on the current development environment\&. An additional set of nonstandard options are specific to the current virtual machine and compiler implementations and are subject to change in the future\&. Nonstandard options begin with the -\fB\-X\fR -option\&. -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -See also Cross\-Compilation Options -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -See also Nonstandard Options -.RE -.SS "Standard Options" +\f3Example 2 Two Argument Files\fR .PP -\-A\fIkey\fR[\fI=value\fR] -.RS 4 -Specifies options to pass to annotation processors\&. These options are not interpreted by -\fBjavac\fR -directly, but are made available for use by individual processors\&. The -\fBkey\fR -value should be one or more identifiers separated by a dot (\&.)\&. -.RE +You can create two argument files: one for the \f3javac\fR options and the other for the source file names\&. Note that the following lists have no line-continuation characters\&. .PP -\-cp \fIpath\fR or \-classpath \fIpath\fR -.RS 4 -Specifies where to find user class files, and (optionally) annotation processors and source files\&. This class path overrides the user class path in the -\fBCLASSPATH\fR -environment variable\&. If neither -\fBCLASSPATH\fR, -\fB\-cp\fR -nor -\fB\-classpath\fR -is specified, then the user -\fIclass path\fR -is the current directory\&. See Setting the Class Path \&. -.sp -If the -\fB\-sourcepath\fR -option is not specified, then the user class path is also searched for source files\&. -.sp -If the -\fB\-processorpath\fR -option is not specified, then the class path is also searched for annotation processors\&. -.RE +Create a file named options that contains the following: +.sp +.nf +\f3\-d classes\fP +.fi +.nf +\f3\-g\fP +.fi +.nf +\f3\-sourcepath /java/pubs/ws/1\&.3/src/share/classes\fP +.fi +.nf +\f3\fP +.fi +.sp +Create a file named classes that contains the following: +.sp +.nf +\f3MyClass1\&.java\fP +.fi +.nf +\f3MyClass2\&.java\fP +.fi +.nf +\f3MyClass3\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp +Then, run the \f3javac\fR command as follows: +.sp +.nf +\f3javac @options @classes\fP +.fi +.nf +\f3\fP +.fi +.sp +\f3Example 3 Argument Files with Paths\fR .PP -\-Djava\&.ext\&.dirs=\fIdirectories\fR -.RS 4 -Overrides the location of installed extensions\&. -.RE +The argument files can have paths, but any file names inside the files are relative to the current working directory (not \f3path1\fR or \f3path2\fR): +.sp +.nf +\f3javac @path1/options @path2/classes\fP +.fi +.nf +\f3\fP +.fi +.sp +.SH ANNOTATION\ PROCESSING +The \f3javac\fR command provides direct support for annotation processing, superseding the need for the separate annotation processing command, \f3apt\fR\&. .PP -\-Djava\&.endorsed\&.dirs=\fIdirectories\fR -.RS 4 -Overrides the location of the endorsed standards path\&. -.RE -.PP -\-d \fIdirectory\fR -.RS 4 -Sets the destination directory for class files\&. The directory must already exist because -\fBjavac\fR -does not create it\&. If a class is part of a package, then -\fBjavac\fR -puts the class file in a subdirectory that reflects the package name and creates directories as needed\&. -.sp -If you specify -\fB\-d\fR -\fB/home/myclasses\fR -and the class is called -\fBcom\&.mypackage\&.MyClass\fR, then the class file is -\fB/home/myclasses/com/mypackage/MyClass\&.class\fR\&. -.sp -If the -\fI\-d\fR -option is not specified, then -\fBjavac\fR -puts each class file in the same directory as the source file from which it was generated\&. -.sp -\fBNote:\fR -The directory specified by the -\fI\-d\fR -option is not automatically added to your user class path\&. -.RE -.PP -\-deprecation -.RS 4 -Shows a description of each use or override of a deprecated member or class\&. Without the -\fB\-deprecation\fR -option, -\fBjavac\fR -shows a summary of the source files that use or override deprecated members or classes\&. The -\fB\-deprecation\fR -option is shorthand for -\fB\-Xlint:deprecation\fR\&. -.RE -.PP -\-encoding \fIencoding\fR -.RS 4 -Sets the source file encoding name, such as EUC\-JP and UTF\-8\&. If the -\fB\-encoding\fR -option is not specified, then the platform default converter is used\&. -.RE -.PP -\-endorseddirs \fIdirectories\fR -.RS 4 -Overrides the location of the endorsed standards path\&. -.RE -.PP -\-extdirs \fIdirectories\fR -.RS 4 -Overrides the location of the -\fBext\fR -directory\&. The directories variable is a colon\-separated list of directories\&. Each JAR file in the specified directories is searched for class files\&. All JAR files found become part of the class path\&. -.sp -If you are cross\-compiling (compiling classes against bootstrap and extension classes of a different Java platform implementation), then this option specifies the directories that contain the extension classes\&. See Cross\-Compilation Options for more information\&. -.RE -.PP -\-g -.RS 4 -Generates all debugging information, including local variables\&. By default, only line number and source file information is generated\&. -.RE -.PP -\-g:none -.RS 4 -Does not generate any debugging information\&. -.RE -.PP -\-g:[\fIkeyword list\fR] -.RS 4 -Generates only some kinds of debugging information, specified by a comma separated list of keywords\&. Valid keywords are: -.PP -source -.RS 4 -Source file debugging information\&. -.RE -.PP -lines -.RS 4 -Line number debugging information\&. -.RE -.PP -vars -.RS 4 -Local variable debugging information\&. -.RE -.RE -.PP -\-help -.RS 4 -Prints a synopsis of standard options\&. -.RE -.PP -\-implicit:[\fIclass, none\fR] -.RS 4 -Controls the generation of class files for implicitly loaded source files\&. To automatically generate class files, use -\fB\-implicit:class\fR\&. To suppress class file generation, use -\fB\-implicit:none\fR\&. If this option is not specified, then the default is to automatically generate class files\&. In this case, the compiler issues a warning if any such class files are generated when also doing annotation processing\&. The warning is not issued when the -\fB\-implicit\fR -option is set explicitly\&. See Searching for Types\&. -.RE -.PP -\-J\fIoption\fR -.RS 4 -Passes -\fBoption\fR -to the Java Virtual Machine (JVM), where option is one of the options described on the reference page for the Java launcher\&. For example, -\fB\-J\-Xms48m\fR -sets the startup memory to 48 MB\&. See -java(1)\&. -.sp -\fBNote:\fR -The -\fICLASSPATH\fR, -\fB\-classpath\fR, -\fB\-bootclasspath\fR, and -\fB\-extdirs\fR -options do not specify the classes used to run -\fBjavac\fR\&. Trying to customize the compiler implementation with these options and variables is risky and often does not accomplish what you want\&. If you must customize the complier implementation, then use the -\fB\-J\fR -option to pass options through to the underlying Java launcher\&. -.RE -.PP -\-nowarn -.RS 4 -Disables warning messages\&. This option operates the same as the -\fB\-Xlint:none\fR -option\&. -.RE -.PP -\-parameters -.RS 4 -Stores formal parameter names of constructors and methods in the generated class file so that the method -\fBjava\&.lang\&.reflect\&.Executable\&.getParameters\fR -from the Reflection API can retrieve them\&. -.RE -.PP -\-proc: [\fInone\fR, \fIonly\fR] -.RS 4 -Controls whether annotation processing and compilation are done\&. -\fB\-proc:none\fR -means that compilation takes place without annotation processing\&. -\fB\-proc:only\fR -means that only annotation processing is done, without any subsequent compilation\&. -.RE -.PP -\-processor \fIclass1\fR [,\fIclass2\fR,\fIclass3\fR\&.\&.\&.] -.RS 4 -Names of the annotation processors to run\&. This bypasses the default discovery process\&. -.RE -.PP -\-processorpath \fIpath\fR -.RS 4 -Specifies where to find annotation processors\&. If this option is not used, then the class path is searched for processors\&. -.RE -.PP -\-s \fIdir\fR -.RS 4 -Specifies the directory where to place the generated source files\&. The directory must already exist because -\fBjavac\fR -does not create it\&. If a class is part of a package, then the compiler puts the source file in a subdirectory that reflects the package name and creates directories as needed\&. -.sp -If you specify -\fB\-s /home/mysrc\fR -and the class is called -\fBcom\&.mypackage\&.MyClass\fR, then the source file is put in -\fB/home/mysrc/com/mypackage/MyClass\&.java\fR\&. -.RE -.PP -\-source \fIrelease\fR -.RS 4 -Specifies the version of source code accepted\&. The following values for -\fBrelease\fR -are allowed: -.PP -1\&.3 -.RS 4 -The compiler does not support assertions, generics, or other language features introduced after Java SE 1\&.3\&. -.RE -.PP -1\&.4 -.RS 4 -The compiler accepts code containing assertions, which were introduced in Java SE 1\&.4\&. -.RE -.PP -1\&.5 -.RS 4 -The compiler accepts code containing generics and other language features introduced in Java SE 5\&. -.RE -.PP -5 -.RS 4 -Synonym for 1\&.5\&. -.RE -.PP -1\&.6 -.RS 4 -No language changes were introduced in Java SE 6\&. However, encoding errors in source files are now reported as errors instead of warnings as in earlier releases of Java Platform, Standard Edition\&. -.RE -.PP -6 -.RS 4 -Synonym for 1\&.6\&. -.RE -.PP -1\&.7 -.RS 4 -The compiler accepts code with features introduced in Java SE 7\&. -.RE -.PP -7 -.RS 4 -Synonym for 1\&.7\&. -.RE -.PP -1\&.8 -.RS 4 -This is the default value\&. The compiler accepts code with features introduced in Java SE 8\&. -.RE -.PP -8 -.RS 4 -Synonym for 1\&.8\&. -.RE -.RE -.PP -\-sourcepath \fIsourcepath\fR -.RS 4 -Specifies the source code path to search for class or interface definitions\&. As with the user class path, source path entries are separated by colons (:) on Oracle Solaris and semicolons on Windows and can be directories, JAR archives, or ZIP archives\&. If packages are used, then the local path name within the directory or archive must reflect the package name\&. -.sp -\fBNote:\fR -Classes found through the class path might be recompiled when their source files are also found\&. See Searching for Types\&. -.RE -.PP -\-verbose -.RS 4 -Uses verbose output, which includes information about each class loaded and each source file compiled\&. -.RE -.PP -\-version -.RS 4 -Prints release information\&. -.RE -.PP -\-werror -.RS 4 -Terminates compilation when warnings occur\&. -.RE -.PP -\-X -.RS 4 -Displays information about nonstandard options and exits\&. -.RE -.SS "Cross\-Compilation Options" -.PP -By default, classes are compiled against the bootstrap and extension classes of the platform that -\fBjavac\fR -shipped with\&. But -\fBjavac\fR -also supports cross\-compiling, where classes are compiled against a bootstrap and extension classes of a different Java platform implementation\&. It is important to use the -\fB\-bootclasspath\fR -and -\fB\-extdirs\fR -options when cross\-compiling\&. -.PP -\-target \fIversion\fR -.RS 4 -Generates class files that target a specified release of the virtual machine\&. Class files will run on the specified target and on later releases, but not on earlier releases of the JVM\&. Valid targets are 1\&.1, 1\&.2, 1\&.3, 1\&.4, 1\&.5 (also 5), 1\&.6 (also 6), 1\&.7 (also 7), and 1\&.8 (also 8)\&. -.sp -The default for the -\fB\-target\fR -option depends on the value of the -\fB\-source\fR -option: -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -If the -\fB\-source\fR -option is not specified, then the value of the -\fB\-target\fR -option is 1\&.8 -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -If the -\fB\-source\fR -option is 1\&.2, then the value of the -\fB\-target\fR -option is 1\&.4 -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -If the -\fB\-source\fR -option is 1\&.3, then the value of the -\fB\-target\fR -option is 1\&.4 -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -If the -\fB\-source\fR -option is 1\&.5, then the value of the -\fB\-target\fR -option is 1\&.8 -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -If the -\fB\-source\fR -option is 1\&.6, then the value of the -\fB\-target\fR -is option 1\&.8 -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -If the -\fB\-source\fR -option is 1\&.7, then the value of the -\fB\-target\fR -is option 1\&.8 -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -For all other values of the -\fB\-source\fR -option, the value of the -\fB\-target\fR -option is the value of the -\fB\-source\fR -option\&. -.RE -.RE -.PP -\-bootclasspath \fIbootclasspath\fR -.RS 4 -Cross\-compiles against the specified set of boot classes\&. As with the user class path, boot class path entries are separated by colons (:) and can be directories, JAR archives, or ZIP archives\&. -.RE -.SS "Compact Profile Option" -.PP -Beginning with JDK 8, the -\fBjavac\fR -compiler supports compact profiles\&. With compact profiles, applications that do not require the entire Java platform can be deployed and run with a smaller footprint\&. The compact profiles feature could be used to shorten the download time for applications from app stores\&. This feature makes for more compact deployment of Java applications that bundle the JRE\&. This feature is also useful in small devices\&. -.PP -The supported profile values are -\fBcompact1\fR, -\fBcompact2\fR, and -\fBcompact3\fR\&. These are additive layers\&. Each higher\-numbered compact profile contains all of the APIs in profiles with smaller number names\&. -.PP -\-profile -.RS 4 -When using compact profiles, this option specifies the profile name when compiling\&. For example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac \-profile compact1 Hello\&.java\fR - -.fi -.if n \{\ -.RE -.\} -javac does not compile source code that uses any Java SE APIs that is not in the specified profile\&. Here is an example of the error message that results from attempting to compile such source code: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBcd jdk1\&.8\&.0/bin\fR -\fB\&./javac \-profile compact1 Paint\&.java\fR -\fBPaint\&.java:5: error: Applet is not available in profile \*(Aqcompact1\*(Aq\fR -\fBimport java\&.applet\&.Applet;\fR - -.fi -.if n \{\ -.RE -.\} -In this example, you can correct the error by modifying the source to not use the -\fBApplet\fR -class\&. You could also correct the error by compiling without the \-profile option\&. Then the compilation would be run against the full set of Java SE APIs\&. (None of the compact profiles include the -\fBApplet\fR -class\&.) -.sp -An alternative way to compile with compact profiles is to use the -\fB\-bootclasspath\fR -option to specify a path to an -\fBrt\&.jar\fR -file that specifies a profile\*(Aqs image\&. Using the -\fB\-profile\fR -option instead does not require a profile image to be present on the system at compile time\&. This is useful when cross\-compiling\&. -.RE -.SS "Nonstandard Options" -.PP -\-Xbootclasspath/p:\fIpath\fR -.RS 4 -Adds a suffix to the bootstrap class path\&. -.RE -.PP -\-Xbootclasspath/a:\fIpath\fR -.RS 4 -Adds a prefix to the bootstrap class path\&. -.RE -.PP -\-Xbootclasspath/:\fIpath\fR -.RS 4 -Overrides the location of the bootstrap class files\&. -.RE -.PP -\-Xdoclint:[\-]\fIgroup\fR [\fI/access\fR] -.RS 4 -Enables or disables specific groups of checks, where -\fIgroup\fR -is one of the following values: -\fBaccessibility\fR, -\fBsyntax\fR, -\fBreference\fR, -\fBhtml\fR -or -\fBmissing\fR\&. For more information about these groups of checks see the -\fB\-Xdoclint\fR -option of the -\fBjavadoc\fR -command\&. The -\fB\-Xdoclint\fR -option is disabled by default in the -\fBjavac\fR -command\&. -.sp -The variable -\fIaccess\fR -specifies the minimum visibility level of classes and members that the -\fB\-Xdoclint\fR -option checks\&. It can have one of the following values (in order of most to least visible) : -\fBpublic\fR, -\fBprotected\fR, -\fBpackage\fR -and -\fBprivate\fR\&. For example, the following option checks classes and members (with all groups of checks) that have the access level protected and higher (which includes protected, package and public): -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fB\-Xdoclint:all/protected\fR - -.fi -.if n \{\ -.RE -.\} -The following option enables all groups of checks for all access levels, except it will not check for HTML errors for classes and members that have access level package and higher (which includes package and public): -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fB\-Xdoclint:all,\-html/package\fR - -.fi -.if n \{\ -.RE -.\} -.RE -.PP -\-Xdoclint:none -.RS 4 -Disables all groups of checks\&. -.RE -.PP -\-Xdoclint:all[\fI/access\fR] -.RS 4 -Enables all groups of checks\&. -.RE -.PP -\-Xlint -.RS 4 -Enables all recommended warnings\&. In this release, enabling all available warnings is recommended\&. -.RE -.PP -\-Xlint:all -.RS 4 -Enables all recommended warnings\&. In this release, enabling all available warnings is recommended\&. -.RE -.PP -\-Xlint:none -.RS 4 -Disables all warnings\&. -.RE -.PP -\-Xlint:\fIname\fR -.RS 4 -Disables warning name\&. See Enable or Disable Warnings with the \-Xlint Option for a list of warnings you can disable with this option\&. -.RE -.PP -\-Xlint:\fI\-name\fR -.RS 4 -Disables warning name\&. See Enable or Disable Warnings with the \-Xlint Option with the -\fB\-Xlint\fR -option to get a list of warnings that you can disable with this option\&. -.RE -.PP -\-Xmaxerrs \fInumber\fR -.RS 4 -Sets the maximum number of errors to print\&. -.RE -.PP -\-Xmaxwarns \fInumber\fR -.RS 4 -Sets the maximum number of warnings to print\&. -.RE -.PP -\-Xstdout \fIfilename\fR -.RS 4 -Sends compiler messages to the named file\&. By default, compiler messages go to -\fBSystem\&.err\fR\&. -.RE -.PP -\-Xprefer:[\fInewer,source\fR] -.RS 4 -Specifies which file to read when both a source file and class file are found for a type\&. (See Searching for Types)\&. If the -\fB\-Xprefer:newer\fR -option is used, then it reads the newer of the source or class file for a type (default)\&. If the -\fB\-Xprefer:source\fR -option is used, then it reads the source file\&. Use \-\fBXprefer:source\fR -when you want to be sure that any annotation processors can access annotations declared with a retention policy of -\fBSOURCE\fR\&. -.RE -.PP -\-Xpkginfo:[\fIalways\fR,\fIlegacy\fR,\fInonempty\fR] -.RS 4 -Control whether javac generates -\fBpackage\-info\&.class\fR -files from package\-info\&.java files\&. Possible mode arguments for this option include the following\&. -.PP -always -.RS 4 -Always generate a -\fBpackage\-info\&.class\fR -file for every -\fBpackage\-info\&.java\fR -file\&. This option may be useful if you use a build system such as Ant, which checks that each -\fB\&.java\fR -file has a corresponding -\fB\&.class\fR -file\&. -.RE -.PP -legacy -.RS 4 -Generate a -\fBpackage\-info\&.class\fR -file only if package\-info\&.java contains annotations\&. Don\*(Aqt generate a -\fBpackage\-info\&.class\fR -file if package\-info\&.java only contains comments\&. -.sp -\fBNote:\fR -A -\fBpackage\-info\&.class\fR -file might be generated but be empty if all the annotations in the package\-info\&.java file have -\fBRetentionPolicy\&.SOURCE\fR\&. -.RE -.PP -nonempty -.RS 4 -Generate a -\fBpackage\-info\&.class\fR -file only if package\-info\&.java contains annotations with -\fBRetentionPolicy\&.CLASS\fR -or -\fBRetentionPolicy\&.RUNTIME\fR\&. -.RE -.RE -.PP -\-Xprint -.RS 4 -Prints a textual representation of specified types for debugging purposes\&. Perform neither annotation processing nor compilation\&. The format of the output could change\&. -.RE -.PP -\-XprintProcessorInfo -.RS 4 -Prints information about which annotations a processor is asked to process\&. -.RE -.PP -\-XprintRounds -.RS 4 -Prints information about initial and subsequent annotation processing rounds\&. -.RE -.SH "ENABLE OR DISABLE WARNINGS WITH THE -XLINT OPTION" -.PP -Enable warning -\fIname\fR -with the -\fB\-Xlint:name\fR -option, where -\fBname\fR -is one of the following warning names\&. Note that you can disable a warning with the -\fB\-Xlint:\-name:\fR -option\&. -.PP -cast -.RS 4 -Warns about unnecessary and redundant casts, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBString s = (String) "Hello!"\fR - -.fi -.if n \{\ -.RE -.\} -.RE -.PP -classfile -.RS 4 -Warns about issues related to class file contents\&. -.RE -.PP -deprecation -.RS 4 -Warns about the use of deprecated items, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjava\&.util\&.Date myDate = new java\&.util\&.Date();\fR -\fBint currentDay = myDate\&.getDay();\fR - -.fi -.if n \{\ -.RE -.\} -The method -\fBjava\&.util\&.Date\&.getDay\fR -has been deprecated since JDK 1\&.1 -.RE -.PP -dep\-ann -.RS 4 -Warns about items that are documented with an -\fB@deprecated\fR -Javadoc comment, but do not have a -\fB@Deprecated\fR -annotation, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fB/**\fR -\fB * @deprecated As of Java SE 7, replaced by {@link #newMethod()}\fR -\fB */\fR -\fBpublic static void deprecatedMethood() { }\fR -\fBpublic static void newMethod() { }\fR - -.fi -.if n \{\ -.RE -.\} -.RE -.PP -divzero -.RS 4 -Warns about division by the constant integer 0, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBint divideByZero = 42 / 0;\fR - -.fi -.if n \{\ -.RE -.\} -.RE -.PP -empty -.RS 4 -Warns about empty statements after -\fBif \fRstatements, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBclass E {\fR -\fB void m() {\fR -\fB if (true) ;\fR -\fB }\fR -\fB}\fR - -.fi -.if n \{\ -.RE -.\} -.RE -.PP -fallthrough -.RS 4 -Checks the switch blocks for fall\-through cases and provides a warning message for any that are found\&. Fall\-through cases are cases in a switch block, other than the last case in the block, whose code does not include a break statement, allowing code execution to fall through from that case to the next case\&. For example, the code following the case 1 label in this switch block does not end with a break statement: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBswitch (x) {\fR -\fBcase 1:\fR -\fB System\&.out\&.println("1");\fR -\fB // No break statement here\&.\fR -\fBcase 2:\fR -\fB System\&.out\&.println("2");\fR -\fB}\fR - -.fi -.if n \{\ -.RE -.\} -If the -\fB\-Xlint:fallthrough\fR -option was used when compiling this code, then the compiler emits a warning about possible fall\-through into case, with the line number of the case in question\&. -.RE -.PP -finally -.RS 4 -Warns about -\fBfinally\fR -clauses that cannot complete normally, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBpublic static int m() {\fR -\fB try {\fR -\fB throw new NullPointerException();\fR -\fB } catch (NullPointerException(); {\fR -\fB System\&.err\&.println("Caught NullPointerException\&.");\fR -\fB return 1;\fR -\fB } finally {\fR -\fB return 0;\fR -\fB }\fR -\fB }\fR - -.fi -.if n \{\ -.RE -.\} -The compiler generates a warning for the -\fBfinally\fR -block in this example\&. When the -\fBint\fR -method is called, it returns a value of 0\&. A -\fBfinally\fR -block executes when the -\fBtry\fR -block exits\&. In this example, when control is transferred to the -\fBcatch\fR -block, the -\fBint\fR -method exits\&. However, the -\fBfinally\fR -block must execute, so it is executed, even though control was transferred outside the method\&. -.RE -.PP -options -.RS 4 -Warns about issues that related to the use of command\-line options\&. See Cross\-Compilation Options\&. -.RE -.PP -overrides -.RS 4 -Warns about issues regarding method overrides\&. For example, consider the following two classes: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBpublic class ClassWithVarargsMethod {\fR -\fB void varargsMethod(String\&.\&.\&. s) { }\fR -\fB}\fR - -\fBpublic class ClassWithOverridingMethod extends ClassWithVarargsMethod {\fR -\fB @Override\fR -\fB void varargsMethod(String[] s) { }\fR -\fB}\fR - -.fi -.if n \{\ -.RE -.\} -The compiler generates a warning similar to the following:\&. -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBwarning: [override] varargsMethod(String[]) in ClassWithOverridingMethod \fR -\fBoverrides varargsMethod(String\&.\&.\&.) in ClassWithVarargsMethod; overriding\fR -\fBmethod is missing \*(Aq\&.\&.\&.\*(Aq\fR - -.fi -.if n \{\ -.RE -.\} -When the compiler encounters a -\fBvarargs\fR -method, it translates the -\fBvarargs\fR -formal parameter into an array\&. In the method -\fBClassWithVarargsMethod\&.varargsMethod\fR, the compiler translates the -\fBvarargs\fR -formal parameter -\fBString\&.\&.\&. s\fR -to the formal parameter -\fBString[] s\fR, an array, which matches the formal parameter of the method -\fBClassWithOverridingMethod\&.varargsMethod\fR\&. Consequently, this example compiles\&. -.RE -.PP -path -.RS 4 -Warns about invalid path elements and nonexistent path directories on the command line (with regard to the class path, the source path, and other paths)\&. Such warnings cannot be suppressed with the -\fB@SuppressWarnings\fR -annotation, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac \-Xlint:path \-classpath /nonexistentpath Example\&.java\fR - -.fi -.if n \{\ -.RE -.\} -.RE -.PP -processing -.RS 4 -Warn about issues regarding annotation processing\&. The compiler generates this warning when you have a class that has an annotation, and you use an annotation processor that cannot handle that type of exception\&. For example, the following is a simple annotation processor: -.sp -\fBSource file AnnocProc\&.java\fR: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBimport java\&.util\&.*;\fR -\fBimport javax\&.annotation\&.processing\&.*;\fR -\fBimport javax\&.lang\&.model\&.*;\fR -\fBimport\&.javaz\&.lang\&.model\&.element\&.*;\fR - -\fB@SupportedAnnotationTypes("NotAnno")\fR -\fBpublic class AnnoProc extends AbstractProcessor {\fR -\fB public boolean process(Set elems, RoundEnvironment renv){\fR -\fB return true;\fR -\fB }\fR - -\fB public SourceVersion getSupportedSourceVersion() {\fR -\fB return SourceVersion\&.latest();\fR -\fB }\fR -\fB}\fR - -.fi -.if n \{\ -.RE -.\} -\fBSource file AnnosWithoutProcessors\&.java\fR: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fB@interface Anno { }\fR -\fB \fR -\fB@Anno\fR -\fBclass AnnosWithoutProcessors { }\fR - -.fi -.if n \{\ -.RE -.\} -The following commands compile the annotation processor -\fBAnnoProc\fR, then run this annotation processor against the source file -\fBAnnosWithoutProcessors\&.java\fR: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac AnnoProc\&.java\fR -\fBjavac \-cp \&. \-Xlint:processing \-processor AnnoProc \-proc:only AnnosWithoutProcessors\&.java\fR - -.fi -.if n \{\ -.RE -.\} -When the compiler runs the annotation processor against the source file -\fBAnnosWithoutProcessors\&.java\fR, it generates the following warning: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBwarning: [processing] No processor claimed any of these annotations: Anno\fR -\fB \fR -.fi -.if n \{\ -.RE -.\} -To resolve this issue, you can rename the annotation defined and used in the class -\fBAnnosWithoutProcessors\fR -from -\fBAnno\fR -to -\fBNotAnno\fR\&. -.RE -.PP -rawtypes -.RS 4 -Warns about unchecked operations on raw types\&. The following statement generates a -\fBrawtypes\fR -warning: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBvoid countElements(List l) { \&.\&.\&. }\fR - -.fi -.if n \{\ -.RE -.\} -The following example does not generate a -\fBrawtypes\fR -warning -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBvoid countElements(List l) { \&.\&.\&. }\fR - -.fi -.if n \{\ -.RE -.\} -\fBList\fR -is a raw type\&. However, -\fBList\fR -is an unbounded wildcard parameterized type\&. Because -\fBList\fR -is a parameterized interface, always specify its type argument\&. In this example, the -\fBList\fR -formal argument is specified with an unbounded wildcard (\fB?\fR) as its formal type parameter, which means that the -\fBcountElements\fR -method can accept any instantiation of the -\fBList\fR -interface\&. -.RE -.PP -Serial -.RS 4 -Warns about missing -\fBserialVersionUID\fR -definitions on serializable classes, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBpublic class PersistentTime implements Serializable\fR -\fB{\fR -\fB private Date time;\fR -\fB \fR -\fB public PersistentTime() {\fR -\fB time = Calendar\&.getInstance()\&.getTime();\fR -\fB }\fR -\fB \fR -\fB public Date getTime() {\fR -\fB return time;\fR -\fB }\fR -\fB}\fR - -.fi -.if n \{\ -.RE -.\} -The compiler generates the following warning: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBwarning: [serial] serializable class PersistentTime has no definition of\fR -\fBserialVersionUID\fR - -.fi -.if n \{\ -.RE -.\} -If a serializable class does not explicitly declare a field named -\fBserialVersionUID\fR, then the serialization runtime environment calculates a default -\fBserialVersionUID\fR -value for that class based on various aspects of the class, as described in the Java Object Serialization Specification\&. However, it is strongly recommended that all serializable classes explicitly declare -\fBserialVersionUID\fR -values because the default process of computing -\fBserialVersionUID\fR -vales is highly sensitive to class details that can vary depending on compiler implementations, and as a result, might cause an unexpected -\fBInvalidClassExceptions\fR -during deserialization\&. To guarantee a consistent -\fBserialVersionUID\fR -value across different Java compiler implementations, a serializable class must declare an explicit -\fBserialVersionUID\fR -value\&. -.RE -.PP -static -.RS 4 -Warns about issues relating to the use of statics, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBclass XLintStatic {\fR -\fB static void m1() { }\fR -\fB void m2() { this\&.m1(); }\fR -\fB}\fR - -.fi -.if n \{\ -.RE -.\} -The compiler generates the following warning: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBwarning: [static] static method should be qualified by type name, \fR -\fBXLintStatic, instead of by an expression\fR - -.fi -.if n \{\ -.RE -.\} -To resolve this issue, you can call the -\fBstatic\fR -method -\fBm1\fR -as follows: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBXLintStatic\&.m1();\fR - -.fi -.if n \{\ -.RE -.\} -Alternately, you can remove the -\fBstatic\fR -keyword from the declaration of the method -\fBm1\fR\&. -.RE -.PP -try -.RS 4 -Warns about issues relating to use of -\fBtry\fR -blocks, including try\-with\-resources statements\&. For example, a warning is generated for the following statement because the resource -\fBac\fR -declared in the -\fBtry\fR -block is not used: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBtry ( AutoCloseable ac = getResource() ) { // do nothing}\fR - -.fi -.if n \{\ -.RE -.\} -.RE -.PP -unchecked -.RS 4 -Gives more detail for unchecked conversion warnings that are mandated by the Java Language Specification, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBList l = new ArrayList();\fR -\fBList ls = l; // unchecked warning\fR - -.fi -.if n \{\ -.RE -.\} -During type erasure, the types -\fBArrayList\fR -and -\fBList\fR -become -\fBArrayList\fR -and -\fBList\fR, respectively\&. -.sp -The -\fBls\fR -command has the parameterized type -\fBList\fR\&. When the -\fBList\fR -referenced by -\fBl\fR -is assigned to -\fBls\fR, the compiler generates an unchecked warning\&. At compile time, the compiler and JVM cannot determine whether -\fBl\fR -refers to a -\fBList\fR -type\&. In this case, -\fBl\fR -does not refer to a -\fBList\fR -type\&. As a result, heap pollution occurs\&. -.sp -A heap pollution situation occurs when the -\fBList\fR -object -\fBl\fR, whose static type is -\fBList\fR, is assigned to another -\fBList\fR -object, -\fBls\fR, that has a different static type, -\fBList\fR\&. However, the compiler still allows this assignment\&. It must allow this assignment to preserve backward compatibility with releases of Java SE that do not support generics\&. Because of type erasure, -\fBList\fR -and -\fBList\fR -both become -\fBList\fR\&. Consequently, the compiler allows the assignment of the object -\fBl\fR\fB,\fR -which has a raw type of -\fBList\fR, to the object -\fBls\fR\&. -.RE -.PP -varargs -.RS 4 -Warns about unsafe usages of variable arguments (\fBvarargs\fR) methods, in particular, those that contain non\-reifiable arguments, for example: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBpublic class ArrayBuilder {\fR -\fB public static void addToList (List listArg, T\&.\&.\&. elements) {\fR -\fB for (T x : elements) {\fR -\fB listArg\&.add(x);\fR -\fB }\fR -\fB }\fR -\fB}\fR - -.fi -.if n \{\ -.RE -.\} -\fBNote:\fR -A non\-reifiable type is a type whose type information is not fully available at runtime\&. -.sp -The compiler generates the following warning for the definition of the method -\fBArrayBuilder\&.addToList\fR -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBwarning: [varargs] Possible heap pollution from parameterized vararg type T\fR - -.fi -.if n \{\ -.RE -.\} -When the compiler encounters a varargs method, it translates the -\fBvarargs\fR -formal parameter into an array\&. However, the Java programming language does not permit the creation of arrays of parameterized types\&. In the method -\fBArrayBuilder\&.addToList\fR, the compiler translates the -\fBvarargs\fR -formal parameter -\fBT\&.\&.\&.\fR -elements to the formal parameter -\fBT[]\fR -elements, an array\&. However, because of type erasure, the compiler converts the -\fBvarargs\fR -formal parameter to -\fBObject[]\fR -elements\&. Consequently, there is a possibility of heap pollution\&. -.RE -.SH "COMMAND-LINE ARGUMENT FILES" -.PP -To shorten or simplify the -\fBjavac\fR -command, you can specify one or more files that contain arguments to the -\fBjavac\fR -command (except -\fB\-J\fR -options)\&. This enables you to create -\fBjavac\fR -commands of any length on any operating system\&. -.PP -An argument file can include -\fBjavac\fR -options and source file names in any combination\&. The arguments within a file can be separated by spaces or new line characters\&. If a file name contains embedded spaces, then put the whole file name in double quotation marks\&. -.PP -File Names within an argument file are relative to the current directory, not the location of the argument file\&. Wild cards (*) are not allowed in these lists (such as for specifying -\fB*\&.java\fR)\&. Use of the at sign (@) to recursively interpret files is not supported\&. The -\fB\-J\fR -options are not supported because they are passed to the launcher, which does not support argument files\&. -.PP -When executing the -\fBjavac\fR -command, pass in the path and name of each argument file with the at sign (@) leading character\&. When the -\fBjavac\fR -command encounters an argument beginning with the at sign (@), it expands the contents of that file into the argument list\&. -.PP -\fBExample 1\fR -.br -Single Argument File -.RS 4 -You could use a single argument file named -\fBargfile\fR -to hold all -\fBjavac\fR -arguments: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac @argfile\fR - -.fi -.if n \{\ -.RE -.\} -This argument file could contain the contents of both files shown in Example 2 -.RE -.PP -\fBExample 2\fR -.br -Two Argument Files -.RS 4 -You can create two argument files: one for the -\fBjavac\fR -options and the other for the source file names\&. Note that the following lists have no line\-continuation characters\&. -.sp -Create a file named options that contains the following: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fB\-d classes\fR -.fi -.if n \{\ -.RE -.\} -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fB\-g\fR -.fi -.if n \{\ -.RE -.\} -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fB\-sourcepath /java/pubs/ws/1\&.3/src/share/classes\fR -.fi -.if n \{\ -.RE -.\} -.sp -.if n \{\ -.RS 4 -.\} -.nf - -.fi -.if n \{\ -.RE -.\} -Create a file named classes that contains the following: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBMyClass1\&.java\fR -\fBMyClass2\&.java\fR -\fBMyClass3\&.java\fR - -.fi -.if n \{\ -.RE -.\} -Then, run the -\fBjavac\fR -command as follows: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac @options @classes\fR - -.fi -.if n \{\ -.RE -.\} -.RE -.PP -\fBExample 3\fR -.br -Argument Files with Paths -.RS 4 -The argument files can have paths, but any file names inside the files are relative to the current working directory (not -\fBpath1\fR -or -\fBpath2\fR): -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac @path1/options @path2/classes\fR - -.fi -.if n \{\ -.RE -.\} -.RE -.SH "ANNOTATION PROCESSING" -.PP -The -\fBjavac\fR -command provides direct support for annotation processing, superseding the need for the separate annotation processing command, -\fBapt\fR\&. -.PP -The API for annotation processors is defined in the -\fBjavax\&.annotation\&.processing\fR -and j\fBavax\&.lang\&.model\fR -packages and subpackages\&. -.SS "How Annotation Processing Works" -.PP -Unless annotation processing is disabled with the -\fB\-proc:none\fR -option, the compiler searches for any annotation processors that are available\&. The search path can be specified with the -\fB\-processorpath\fR -option\&. If no path is specified, then the user class path is used\&. Processors are located by means of service provider\-configuration files named -\fBMETA\-INF/services/javax\&.annotation\&.processing\fR\&.Processor on the search path\&. Such files should contain the names of any annotation processors to be used, listed one per line\&. Alternatively, processors can be specified explicitly, using the -\fB\-processor\fR -option\&. +The API for annotation processors is defined in the \f3javax\&.annotation\&.processing\fR and j\f3avax\&.lang\&.model\fR packages and subpackages\&. +.SS HOW\ ANNOTATION\ PROCESSING\ WORKS +Unless annotation processing is disabled with the \f3-proc:none\fR option, the compiler searches for any annotation processors that are available\&. The search path can be specified with the \f3-processorpath\fR option\&. If no path is specified, then the user class path is used\&. Processors are located by means of service provider-configuration files named \f3META-INF/services/javax\&.annotation\&.processing\fR\&.Processor on the search path\&. Such files should contain the names of any annotation processors to be used, listed one per line\&. Alternatively, processors can be specified explicitly, using the \f3-processor\fR option\&. .PP After scanning the source files and classes on the command line to determine what annotations are present, the compiler queries the processors to determine what annotations they process\&. When a match is found, the processor is called\&. A processor can claim the annotations it processes, in which case no further attempt is made to find any processors for those annotations\&. After all of the annotations are claimed, the compiler does not search for additional processors\&. .PP If any processors generate new source files, then another round of annotation processing occurs: Any newly generated source files are scanned, and the annotations processed as before\&. Any processors called on previous rounds are also called on all subsequent rounds\&. This continues until no new source files are generated\&. .PP -After a round occurs where no new source files are generated, the annotation processors are called one last time, to give them a chance to complete any remaining work\&. Finally, unless the -\fB\-proc:only\fR -option is used, the compiler compiles the original and all generated source files\&. -.SS "Implicitly Loaded Source Files" -.PP -To compile a set of source files, the compiler might need to implicitly load additional source files\&. See Searching for Types\&. Such files are currently not subject to annotation processing\&. By default, the compiler gives a warning when annotation processing occurred and any implicitly loaded source files are compiled\&. The -\fB\-implicit\fR -option provides a way to suppress the warning\&. -.SH "SEARCHING FOR TYPES" -.PP +After a round occurs where no new source files are generated, the annotation processors are called one last time, to give them a chance to complete any remaining work\&. Finally, unless the \f3-proc:only\fR option is used, the compiler compiles the original and all generated source files\&. +.SS IMPLICITLY\ LOADED\ SOURCE\ FILES +To compile a set of source files, the compiler might need to implicitly load additional source files\&. See Searching for Types\&. Such files are currently not subject to annotation processing\&. By default, the compiler gives a warning when annotation processing occurred and any implicitly loaded source files are compiled\&. The \f3-implicit\fR option provides a way to suppress the warning\&. +.SH SEARCHING\ FOR\ TYPES To compile a source file, the compiler often needs information about a type, but the type definition is not in the source files specified on the command line\&. The compiler needs type information for every class or interface used, extended, or implemented in the source file\&. This includes classes and interfaces not explicitly mentioned in the source file, but that provide information through inheritance\&. .PP -For example, when you create a subclass -\fBjava\&.applet\&.Applet\fR, you are also using the ancestor classes of -\fBApplet\fR: -\fBjava\&.awt\&.Panel\fR, -\fBjava\&.awt\&.Container\fR, -\fBjava\&.awt\&.Component\fR, and -\fBjava\&.lang\&.Object\fR\&. +For example, when you create a subclass \f3java\&.applet\&.Applet\fR, you are also using the ancestor classes of \f3Applet\fR: \f3java\&.awt\&.Panel\fR, \f3java\&.awt\&.Container\fR, \f3java\&.awt\&.Component\fR, and \f3java\&.lang\&.Object\fR\&. .PP -When the compiler needs type information, it searches for a source file or class file that defines the type\&. The compiler searches for class files first in the bootstrap and extension classes, then in the user class path (which by default is the current directory)\&. The user class path is defined by setting the -\fBCLASSPATH\fR -environment variable or by using the -\fB\-classpath\fR -option\&. +When the compiler needs type information, it searches for a source file or class file that defines the type\&. The compiler searches for class files first in the bootstrap and extension classes, then in the user class path (which by default is the current directory)\&. The user class path is defined by setting the \f3CLASSPATH\fR environment variable or by using the \f3-classpath\fR option\&. .PP -If you set the -\fB\-sourcepath\fR -option, then the compiler searches the indicated path for source files\&. Otherwise, the compiler searches the user class path for both class files and source files\&. +If you set the \f3-sourcepath\fR option, then the compiler searches the indicated path for source files\&. Otherwise, the compiler searches the user class path for both class files and source files\&. .PP -You can specify different bootstrap or extension classes with the -\fB\-bootclasspath\fR -and the -\fB\-extdirs\fR -options\&. See Cross\-Compilation Options\&. +You can specify different bootstrap or extension classes with the \f3-bootclasspath\fR and the \f3-extdirs\fR options\&. See Cross-Compilation Options\&. .PP -A successful type search may produce a class file, a source file, or both\&. If both are found, then you can use the -\fB\-Xprefer\fR -option to instruct the compiler which to use\&. If -\fBnewer\fR -is specified, then the compiler uses the newer of the two files\&. If -\fBsource\fR -is specified, the compiler uses the source file\&. The default is -\fBnewer\fR\&. +A successful type search may produce a class file, a source file, or both\&. If both are found, then you can use the \f3-Xprefer\fR option to instruct the compiler which to use\&. If \f3newer\fR is specified, then the compiler uses the newer of the two files\&. If \f3source\fR is specified, the compiler uses the source file\&. The default is \f3newer\fR\&. .PP -If a type search finds a source file for a required type, either by itself, or as a result of the setting for the -\fB\-Xprefer\fR -option, then the compiler reads the source file to get the information it needs\&. By default the compiler also compiles the source file\&. You can use the -\fB\-implicit\fR -option to specify the behavior\&. If -\fBnone\fR -is specified, then no class files are generated for the source file\&. If -\fBclass\fR -is specified, then class files are generated for the source file\&. +If a type search finds a source file for a required type, either by itself, or as a result of the setting for the \f3-Xprefer\fR option, then the compiler reads the source file to get the information it needs\&. By default the compiler also compiles the source file\&. You can use the \f3-implicit\fR option to specify the behavior\&. If \f3none\fR is specified, then no class files are generated for the source file\&. If \f3class\fR is specified, then class files are generated for the source file\&. .PP -The compiler might not discover the need for some type information until after annotation processing completes\&. When the type information is found in a source file and no -\fB\-implicit\fR -option is specified, the compiler gives a warning that the file is being compiled without being subject to annotation processing\&. To disable the warning, either specify the file on the command line (so that it will be subject to annotation processing) or use the -\fB\-implicit\fR -option to specify whether or not class files should be generated for such source files\&. -.SH "PROGRAMMATIC INTERFACE" +The compiler might not discover the need for some type information until after annotation processing completes\&. When the type information is found in a source file and no \f3-implicit\fR option is specified, the compiler gives a warning that the file is being compiled without being subject to annotation processing\&. To disable the warning, either specify the file on the command line (so that it will be subject to annotation processing) or use the \f3-implicit\fR option to specify whether or not class files should be generated for such source files\&. +.SH PROGRAMMATIC\ INTERFACE +The \f3javac\fR command supports the new Java Compiler API defined by the classes and interfaces in the \f3javax\&.tools\fR package\&. +.SS EXAMPLE +To compile as though providing command-line arguments, use the following syntax: +.sp +.nf +\f3JavaCompiler javac = ToolProvider\&.getSystemJavaCompiler();\fP +.fi +.nf +\f3\fP +.fi +.sp +The example writes diagnostics to the standard output stream and returns the exit code that \f3javac\fR would give when called from the command line\&. .PP -The -\fBjavac\fR -command supports the new Java Compiler API defined by the classes and interfaces in the -\fBjavax\&.tools\fR -package\&. -.SS "Example" +You can use other methods in the \f3javax\&.tools\&.JavaCompiler\fR interface to handle diagnostics, control where files are read from and written to, and more\&. +.SS OLD\ INTERFACE +\fINote:\fR This API is retained for backward compatibility only\&. All new code should use the newer Java Compiler API\&. .PP -To compile as though providing command\-line arguments, use the following syntax: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBJavaCompiler javac = ToolProvider\&.getSystemJavaCompiler();\fR - -.fi -.if n \{\ -.RE -.\} +The \f3com\&.sun\&.tools\&.javac\&.Main\fR class provides two static methods to call the compiler from a program: +.sp +.nf +\f3public static int compile(String[] args);\fP +.fi +.nf +\f3public static int compile(String[] args, PrintWriter out);\fP +.fi +.nf +\f3\fP +.fi +.sp +The \f3args\fR parameter represents any of the command-line arguments that would typically be passed to the compiler\&. .PP -The example writes diagnostics to the standard output stream and returns the exit code that -\fBjavac\fR -would give when called from the command line\&. +The \f3out\fR parameter indicates where the compiler diagnostic output is directed\&. .PP -You can use other methods in the -\fBjavax\&.tools\&.JavaCompiler\fR -interface to handle diagnostics, control where files are read from and written to, and more\&. -.SS "Old Interface" +The \f3return\fR value is equivalent to the \f3exit\fR value from \f3javac\fR\&. .PP -\fBNote:\fR -This API is retained for backward compatibility only\&. All new code should use the newer Java Compiler API\&. +\fINote:\fR All other classes and methods found in a package with names that start with \f3com\&.sun\&.tools\&.javac\fR (subpackages of \f3com\&.sun\&.tools\&.javac\fR) are strictly internal and subject to change at any time\&. +.SH EXAMPLES +\f3Example 1 Compile a Simple Program\fR .PP -The -\fBcom\&.sun\&.tools\&.javac\&.Main\fR -class provides two static methods to call the compiler from a program: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBpublic static int compile(String[] args);\fR -\fBpublic static int compile(String[] args, PrintWriter out);\fR - -.fi -.if n \{\ -.RE -.\} +This example shows how to compile the \f3Hello\&.java\fR source file in the greetings directory\&. The class defined in \f3Hello\&.java\fR is called \f3greetings\&.Hello\fR\&. The greetings directory is the package directory both for the source file and the class file and is underneath the current directory\&. This makes it possible to use the default user class path\&. It also makes it unnecessary to specify a separate destination directory with the \f3-d\fR option\&. .PP -The -\fBargs\fR -parameter represents any of the command\-line arguments that would typically be passed to the compiler\&. +The source code in \f3Hello\&.java\fR: +.sp +.nf +\f3package greetings;\fP +.fi +.nf +\f3\fP +.fi +.nf +\f3public class Hello {\fP +.fi +.nf +\f3 public static void main(String[] args) {\fP +.fi +.nf +\f3 for (int i=0; i < args\&.length; i++) {\fP +.fi +.nf +\f3 System\&.out\&.println("Hello " + args[i]);\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3 }\fP +.fi +.nf +\f3}\fP +.fi +.nf +\f3\fP +.fi +.sp +Compile greetings\&.Hello: +.sp +.nf +\f3javac greetings/Hello\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp +Run \f3greetings\&.Hello\fR: +.sp +.nf +\f3java greetings\&.Hello World Universe Everyone\fP +.fi +.nf +\f3Hello World\fP +.fi +.nf +\f3Hello Universe\fP +.fi +.nf +\f3Hello Everyone\fP +.fi +.nf +\f3\fP +.fi +.sp +\f3Example 2 Compile Multiple Source Files\fR .PP -The -\fBout\fR -parameter indicates where the compiler diagnostic output is directed\&. +This example compiles the \f3Aloha\&.java\fR, \f3GutenTag\&.java\fR, \f3Hello\&.java\fR, and \f3Hi\&.java\fR source files in the \f3greetings\fR package\&. +.sp +.nf +\f3% javac greetings/*\&.java\fP +.fi +.nf +\f3% ls greetings\fP +.fi +.nf +\f3Aloha\&.class GutenTag\&.class Hello\&.class Hi\&.class\fP +.fi +.nf +\f3Aloha\&.java GutenTag\&.java Hello\&.java Hi\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp +\f3Example 3 Specify a User Class Path\fR .PP -The -\fBreturn\fR -value is equivalent to the -\fBexit\fR -value from -\fBjavac\fR\&. +After changing one of the source files in the previous example, recompile it: +.sp +.nf +\f3pwd\fP +.fi +.nf +\f3/examples\fP +.fi +.nf +\f3javac greetings/Hi\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp +Because \f3greetings\&.Hi\fR refers to other classes in the \f3greetings\fR package, the compiler needs to find these other classes\&. The previous example works because the default user class path is the directory that contains the package directory\&. If you want to recompile this file without concern for which directory you are in, then add the examples directory to the user class path by setting \f3CLASSPATH\fR\&. This example uses the \f3-classpath\fR option\&. +.sp +.nf +\f3javac \-classpath /examples /examples/greetings/Hi\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp +If you change \f3greetings\&.Hi\fR to use a banner utility, then that utility also needs to be accessible through the user class path\&. +.sp +.nf +\f3javac \-classpath /examples:/lib/Banners\&.jar \e\fP +.fi +.nf +\f3 /examples/greetings/Hi\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp +To execute a class in the \f3greetings\fR package, the program needs access to the \f3greetings\fR package, and to the classes that the \f3greetings\fR classes use\&. +.sp +.nf +\f3java \-classpath /examples:/lib/Banners\&.jar greetings\&.Hi\fP +.fi +.nf +\f3\fP +.fi +.sp +\f3Example 4 Separate Source Files and Class Files\fR .PP -\fBNote:\fR -All other classes and methods found in a package with names that start with -\fBcom\&.sun\&.tools\&.javac\fR -(subpackages of -\fBcom\&.sun\&.tools\&.javac\fR) are strictly internal and subject to change at any time\&. -.SH "EXAMPLES" +The following example uses \f3javac\fR to compile code that runs on JVM 1\&.7\&. +.sp +.nf +\f3javac \-source 1\&.7 \-target 1\&.7 \-bootclasspath jdk1\&.7\&.0/lib/rt\&.jar \e \fP +.fi +.nf +\f3\-extdirs "" OldCode\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp +The \f3-source 1\&.7\fR option specifies that release 1\&.7 (or 7) of the Java programming language be used to compile \f3OldCode\&.java\fR\&. The option \f3-target 1\&.7\fR option ensures that the generated class files are compatible with JVM 1\&.7\&. Note that in most cases, the value of the \f3-target\fR option is the value of the \f3-source\fR option; in this example, you can omit the \f3-target\fR option\&. .PP -\fBExample 1\fR -.br -Compile a Simple Program -.RS 4 -This example shows how to compile the -\fBHello\&.java\fR -source file in the greetings directory\&. The class defined in -\fBHello\&.java\fR -is called -\fBgreetings\&.Hello\fR\&. The greetings directory is the package directory both for the source file and the class file and is underneath the current directory\&. This makes it possible to use the default user class path\&. It also makes it unnecessary to specify a separate destination directory with the -\fB\-d\fR -option\&. -.sp -The source code in -\fBHello\&.java\fR: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBpackage greetings;\fR -\fB \fR -\fBpublic class Hello {\fR -\fB public static void main(String[] args) {\fR -\fB for (int i=0; i < args\&.length; i++) {\fR -\fB System\&.out\&.println("Hello " + args[i]);\fR -\fB }\fR -\fB }\fR -\fB}\fR - -.fi -.if n \{\ -.RE -.\} -Compile greetings\&.Hello: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac greetings/Hello\&.java\fR - -.fi -.if n \{\ -.RE -.\} -Run -\fBgreetings\&.Hello\fR: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjava greetings\&.Hello World Universe Everyone\fR -\fBHello World\fR -\fBHello Universe\fR -\fBHello Everyone\fR - -.fi -.if n \{\ -.RE -.\} -.RE +You must specify the \f3-bootclasspath\fR option to specify the correct version of the bootstrap classes (the \f3rt\&.jar\fR library)\&. If not, then the compiler generates a warning: +.sp +.nf +\f3javac \-source 1\&.7 OldCode\&.java\fP +.fi +.nf +\f3warning: [options] bootstrap class path not set in conjunction with \fP +.fi +.nf +\f3\-source 1\&.7\fP +.fi +.nf +\f3\fP +.fi +.sp +If you do not specify the correct version of bootstrap classes, then the compiler uses the old language rules (in this example, it uses version 1\&.7 of the Java programming language) combined with the new bootstrap classes, which can result in class files that do not work on the older platform (in this case, Java SE 7) because reference to nonexistent methods can get included\&. .PP -\fBExample 2\fR -.br -Compile Multiple Source Files -.RS 4 -This example compiles the -\fBAloha\&.java\fR, -\fBGutenTag\&.java\fR, -\fBHello\&.java\fR, and -\fBHi\&.java\fR -source files in the -\fBgreetings\fR -package\&. -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fB% javac greetings/*\&.java\fR -\fB% ls greetings\fR -\fBAloha\&.class GutenTag\&.class Hello\&.class Hi\&.class\fR -\fBAloha\&.java GutenTag\&.java Hello\&.java Hi\&.java\fR - -.fi -.if n \{\ -.RE -.\} -.RE +\f3Example 5 Cross Compile\fR .PP -\fBExample 3\fR -.br -Specify a User Class Path -.RS 4 -After changing one of the source files in the previous example, recompile it: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBpwd\fR -\fB/examples\fR -\fBjavac greetings/Hi\&.java\fR - -.fi -.if n \{\ -.RE -.\} -Because -\fBgreetings\&.Hi\fR -refers to other classes in the -\fBgreetings\fR -package, the compiler needs to find these other classes\&. The previous example works because the default user class path is the directory that contains the package directory\&. If you want to recompile this file without concern for which directory you are in, then add the examples directory to the user class path by setting -\fBCLASSPATH\fR\&. This example uses the -\fB\-classpath\fR -option\&. -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac \-classpath /examples /examples/greetings/Hi\&.java\fR - -.fi -.if n \{\ -.RE -.\} -If you change -\fBgreetings\&.Hi\fR -to use a banner utility, then that utility also needs to be accessible through the user class path\&. -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac \-classpath /examples:/lib/Banners\&.jar \e\fR -\fB /examples/greetings/Hi\&.java\fR - -.fi -.if n \{\ -.RE -.\} -To execute a class in the -\fBgreetings\fR -package, the program needs access to the -\fBgreetings\fR -package, and to the classes that the -\fBgreetings\fR -classes use\&. -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjava \-classpath /examples:/lib/Banners\&.jar greetings\&.Hi\fR - -.fi -.if n \{\ -.RE -.\} -.RE +This example uses \f3javac\fR to compile code that runs on JVM 1\&.7\&. +.sp +.nf +\f3javac \-source 1\&.7 \-target 1\&.7 \-bootclasspath jdk1\&.7\&.0/lib/rt\&.jar \e\fP +.fi +.nf +\f3 \-extdirs "" OldCode\&.java\fP +.fi +.nf +\f3\fP +.fi +.sp +The\f3-source 1\&.7\fR option specifies that release 1\&.7 (or 7) of the Java programming language to be used to compile OldCode\&.java\&. The \f3-target 1\&.7\fR option ensures that the generated class files are compatible with JVM 1\&.7\&. .PP -\fBExample 4\fR -.br -Separate Source Files and Class Files -.RS 4 -The following example uses -\fBjavac\fR -to compile code that runs on JVM 1\&.7\&. -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac \-source 1\&.7 \-target 1\&.7 \-bootclasspath jdk1\&.7\&.0/lib/rt\&.jar \e \fR -\fB\-extdirs "" OldCode\&.java\fR - -.fi -.if n \{\ -.RE -.\} -The -\fB\-source 1\&.7\fR -option specifies that release 1\&.7 (or 7) of the Java programming language be used to compile -\fBOldCode\&.java\fR\&. The option -\fB\-target 1\&.7\fR -option ensures that the generated class files are compatible with JVM 1\&.7\&. Note that in most cases, the value of the -\fB\-target\fR -option is the value of the -\fB\-source\fR -option; in this example, you can omit the -\fB\-target\fR -option\&. -.sp -You must specify the -\fB\-bootclasspath\fR -option to specify the correct version of the bootstrap classes (the -\fBrt\&.jar\fR -library)\&. If not, then the compiler generates a warning: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac \-source 1\&.7 OldCode\&.java\fR -\fBwarning: [options] bootstrap class path not set in conjunction with \fR -\fB\-source 1\&.7\fR - -.fi -.if n \{\ -.RE -.\} -If you do not specify the correct version of bootstrap classes, then the compiler uses the old language rules (in this example, it uses version 1\&.7 of the Java programming language) combined with the new bootstrap classes, which can result in class files that do not work on the older platform (in this case, Java SE 7) because reference to nonexistent methods can get included\&. -.RE -.PP -\fBExample 5\fR -.br -Cross Compile -.RS 4 -This example uses -\fBjavac\fR -to compile code that runs on JVM 1\&.7\&. -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac \-source 1\&.7 \-target 1\&.7 \-bootclasspath jdk1\&.7\&.0/lib/rt\&.jar \e\fR -\fB \-extdirs "" OldCode\&.java\fR - -.fi -.if n \{\ -.RE -.\} -The\fB \-source 1\&.7\fR -option specifies that release 1\&.7 (or 7) of the Java programming language to be used to compile OldCode\&.java\&. The -\fB\-target 1\&.7\fR -option ensures that the generated class files are compatible with JVM 1\&.7\&. In most cases, the value of the -\fB\-target\fR -is the value of -\fB\-source\fR\&. In this example, the -\fB\-target\fR -option is omitted\&. -.sp -You must specify the -\fB\-bootclasspath\fR -option to specify the correct version of the bootstrap classes (the -\fBrt\&.jar\fR -library)\&. If not, then the compiler generates a warning: -.sp -.if n \{\ -.RS 4 -.\} -.nf -\fBjavac \-source 1\&.7 OldCode\&.java\fR -\fBwarning: [options] bootstrap class path not set in conjunction with \-source 1\&.7\fR - -.fi -.if n \{\ -.RE -.\} +You must specify the \f3-bootclasspath\fR option to specify the correct version of the bootstrap classes (the \f3rt\&.jar\fR library)\&. If not, then the compiler generates a warning: +.sp +.nf +\f3javac \-source 1\&.7 OldCode\&.java\fP +.fi +.nf +\f3warning: [options] bootstrap class path not set in conjunction with \-source 1\&.7\fP +.fi +.nf +\f3\fP +.fi +.sp If you do not specify the correct version of bootstrap classes, then the compiler uses the old language rules combined with the new bootstrap classes\&. This combination can result in class files that do not work on the older platform (in this case, Java SE 7) because reference to nonexistent methods can get included\&. In this example, the compiler uses release 1\&.7 of the Java programming language\&. -.RE -.SH "SEE ALSO" -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} +.SH SEE\ ALSO +.TP 0.2i +\(bu java(1) -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} +.TP 0.2i +\(bu jdb(1) -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} +.TP 0.2i +\(bu javah(1) -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} +.TP 0.2i +\(bu javadoc(1) -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} +.TP 0.2i +\(bu jar(1) -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} +.TP 0.2i +\(bu jdb(1) -.RE -.br -'pl 8.5i -'bp +.RE +.br +'pl 8.5i +'bp --- ./jdk/src/bsd/doc/man/javadoc.1 Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/src/bsd/doc/man/javadoc.1 Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ '\" t -.\" Copyright (c) 1994, 2013, Oracle and/or its affiliates. All rights reserved. +.\" Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved. .\" .\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. .\" @@ -23,12 +23,12 @@ .\" .\" Arch: generic .\" Software: JDK 8 -.\" Date: 10 May 2011 +.\" Date: 03 March 2015 .\" SectDesc: Basic Tools .\" Title: javadoc.1 .\" .if n .pl 99999 -.TH javadoc 1 "10 May 2011" "JDK 8" "Basic Tools" +.TH javadoc 1 "03 March 2015" "JDK 8" "Basic Tools" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -209,7 +209,7 @@ \f3package java\&.lang\&.applet;\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -251,7 +251,7 @@ \f3initialize, start, and stop the applet\&. \fP .fi .nf -\f3\fR +\f3\fP .fi .nf \f3@since 1\&.0 \fP @@ -266,7 +266,7 @@ \f3\fP .fi .nf -\f3\fR +\f3\fP .fi .sp The \f3package\&.html\fR file is a typical HTML file and does not include a package declaration\&. The content of the package comment file is written in HTML with one exception\&. The documentation comment should not include the comment separators \f3/**\fR and \f3*/\fR or leading asterisks\&. When writing the comment, make the first sentence a summary about the package, and do not put a title or any other text between the \f3\fR tag and the first sentence\&. You can include package tags\&. All block tags must appear after the main description\&. If you add an \f3@see\fR tag in a package comment file, then it must have a fully qualified name\&. @@ -334,7 +334,7 @@ \f3 */\fP .fi .nf -\f3\fR +\f3\fP .fi .sp .SS TEST\ AND\ TEMPLATE\ FILES @@ -350,7 +350,7 @@ \f3com/package1/test\-files/\fP .fi .nf -\f3\fR +\f3\fP .fi .sp If your test files contain documentation comments, then you can set up a separate run of the \f3javadoc\fR command to produce test file documentation by passing in their test source file names with wild cards, such as \f3com/package1/test-files/*\&.java\fR\&. @@ -560,7 +560,7 @@ \f3implements Serializable\fP .fi .nf -\f3\fR +\f3\fP .fi .sp The declaration for the \f3Boolean\&.valueOf\fR method is: @@ -569,7 +569,7 @@ \f3public static Boolean valueOf(String s)\fP .fi .nf -\f3\fR +\f3\fP .fi .sp The \f3javadoc\fR command can include the modifiers \f3public\fR, \f3protected\fR, \f3private\fR, \f3abstract\fR, \f3final\fR, \f3static\fR, \f3transient\fR, and \f3volatile\fR, but not \f3synchronized\fR or \f3native\fR\&. The \f3synchronized\fR and \f3native\fR modifiers are considered implementation detail and not part of the API specification\&. @@ -593,7 +593,7 @@ \f3 */\fP .fi .nf -\f3\fR +\f3\fP .fi .sp To save space you can put a comment on one line: @@ -602,7 +602,7 @@ \f3/** This comment takes up only one line\&. */\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -623,19 +623,19 @@ \f3 */\fP .fi .nf -\f3\fR +\f3\fP .fi .nf \f3import com\&.example; // MISTAKE \- Important not to put import statement here\fP .fi .nf -\f3\fR +\f3\fP .fi .nf \f3public class Whatever{ }\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -657,7 +657,7 @@ \f3 */\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -676,7 +676,7 @@ \f3 */\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -700,7 +700,7 @@ \f3 */\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -730,7 +730,7 @@ \f3public int x, y; // Avoid this \fP .fi .nf -\f3\fR +\f3\fP .fi .sp The \f3javadoc\fR command generates the following documentation from the previous code: @@ -739,7 +739,7 @@ \f3public int x\fP .fi .nf -\f3\fR +\f3\fP .fi .sp The horizontal and vertical distances of point (x, y)\&. @@ -748,7 +748,7 @@ \f3public int y\fP .fi .nf -\f3\fR +\f3\fP .fi .sp The horizontal and vertical distances of point (x, y)\&. @@ -872,7 +872,7 @@ \f3 */\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -899,11 +899,10 @@ .TP 0.2i \(bu In the text arguments of the \f3@return\fR, \f3@param,\fR and \f3@throws\fR tags of a method\&. In this case, the tag text is copied from the corresponding tag up the hierarchy\&. -.RE -.RS +.RE + + See Method Comment Inheritance for a description of how comments are found in the inheritance hierarchy\&. Note that if this tag is missing, then the comment is or is not automatically inherited according to rules described in that section\&. - -.RE .TP {@link \fIpackage\&.class#member label\fR} Introduced in JDK 1\&.2 @@ -920,7 +919,7 @@ \f3Use the {@link #getComponentAt(int, int) getComponentAt} method\&.\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -931,7 +930,7 @@ \f3Use the getComponentAt method\&.\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -942,7 +941,7 @@ \f3Use the getComponentAt method\&.\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -982,7 +981,7 @@ \f3}\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -1014,7 +1013,7 @@ \f3}\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -1071,7 +1070,7 @@ \f3 */\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -1091,7 +1090,7 @@ \f3\fP .fi .nf -\f3\fR +\f3\fP .fi .sp @@ -1128,7 +1127,7 @@ \f3@see #constructor(Type argname, Type argname,\&.\&.\&.) \fP .fi .nf -\f3\fR +\f3\fP .fi .nf \f3\fIReferencing another class in the current or imported packages\fR\fP @@ -1155,7 +1154,7 @@ \f3@see Class \fP .fi .nf -\f3\fR +\f3\fP .fi .nf \f3\fIReferencing an element in another package (fully qualified)\fR\fP @@ -1185,7 +1184,7 @@ \f3@see package\fP .fi .nf -\f3\fR +\f3\fP .fi .sp \f3\fRNotes about the previous listing: @@ -1215,7 +1214,7 @@ Any enclosing classes and interfaces searching the closest first\&. .TP 0.4i 3\&. -Any superclasses and superonterfaces, searching the closest first\&. +Any superclasses and superinterfaces, searching the closest first\&. .TP 0.4i 4\&. The current package\&. @@ -1307,7 +1306,7 @@ \f3@see "The Java Programming Language" // "The Java Programming Language" \fP .fi .nf -\f3\fR +\f3\fP .fi .sp \fINote:\fR You can extend the \f3@se\fR\f3e\fR tag to link to classes not being documented with the \f3-link\fR option\&. @@ -1317,7 +1316,7 @@ Used in the documentation comment for a default serializable field\&. See Documenting Serializable Fields and Data for a Class at http://docs\&.oracle\&.com/javase/8/docs/platform/serialization/spec/serial-arch\&.html#5251 -See also Oracle\(cqs Criteria for Including Classes in the Serialilzed Form Specification at http://www\&.oracle\&.com/technetwork/java/javase/documentation/serialized-criteria-137781\&.html +See also Oracle\(cqs Criteria for Including Classes in the Serialized Form Specification at http://www\&.oracle\&.com/technetwork/java/javase/documentation/serialized-criteria-137781\&.html An optional \f3field-description\fR should explain the meaning of the field and list the acceptable values\&. When needed, the description can span multiple lines\&. The standard doclet adds this information to the serialized form page\&. See Cross-Reference Pages\&. @@ -1331,13 +1330,12 @@ .TP 0.2i \(bu A private or package-private class that implements \f3Serializable\fR is excluded unless that class (or its package) is marked with the \f3@serial include\fR tag\&. -.RE -.RS +.RE + + For example, the \f3javax\&.swing\fR package is marked with the \f3@serial\fR\f3exclude\fR tag in package\&.html or package-info\&.java\&. The public class \f3java\&.security\&.BasicPermission\fR is marked with the \f3@serial exclude\fR tag\&. The package-private class \f3java\&.util\&.PropertyPermissionCollection\fR is marked with the \f3@serial include\fR tag\&. The \f3@serial\fR tag at the class level overrides the \f3@serial\fR tag at the package level\&. - -.RE .TP @serialData \fIdata-description\fR Introduced in JDK 1\&.2 @@ -1387,7 +1385,7 @@ \f3public static final String SCRIPT_START = " + + + + + + + + + + +
Start First Applet
Start Second Applet
+ + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/Focus/SortingFPT/JDK8048887.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + @test + @bug 8048887 + @summary Tests SortingFTP for an exception caused by the tim-sort algo. + @author anton.tarasov: area=awt.focus + @run main JDK8048887 +*/ + +import javax.swing.JFrame; +import javax.swing.JPanel; +import javax.swing.SwingUtilities; +import java.awt.Dimension; +import java.awt.Color; +import java.awt.GridBagLayout; +import java.awt.GridBagConstraints; +import java.awt.event.WindowAdapter; +import java.awt.event.WindowEvent; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +public class JDK8048887 { + + static volatile boolean passed = true; + + public static void main(String[] args) { + JDK8048887 app = new JDK8048887(); + app.start(); + } + + public void start() { + final CountDownLatch latch = new CountDownLatch(1); + + SwingUtilities.invokeLater(() -> { + // Catch the original exception which sounds like: + // java.lang.IllegalArgumentException: Comparison method violates its general contract! + Thread.currentThread().setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { + public void uncaughtException(Thread t, Throwable e) { + e.printStackTrace(); + if (e instanceof IllegalArgumentException) { + passed = false; + latch.countDown(); + } + } + }); + + TestDialog d = new TestDialog(); + // It's expected that the dialog is focused on start. + // The listener is called after the FTP completes processing and the bug is reproduced or not. + d.addWindowFocusListener(new WindowAdapter() { + public void windowGainedFocus(WindowEvent e) { + latch.countDown(); + } + }); + d.setVisible(true); + }); + + try { + latch.await(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + if (passed) + System.out.println("Test passed."); + else + throw new RuntimeException("Test failed!"); + } +} + +class TestDialog extends JFrame { + + // The layout of the components reproduces the transitivity issue + // with SortingFocusTraversalPolicy relying on the tim-sort algo. + + private static int[] Xs = new int[] {71, 23, 62, 4, 79, 39, 34, 9, 84, 58, 30, 34, 38, 15, 69, 10, 44, 95, 70, 54, + 44, 62, 77, 64, 70, 83, 31, 48, 96, 54, 40, 3, 60, 58, 3, 20, 94, 54, 26, 19, 48, 47, 12, 70, 86, 43, 71, 97, 19, + 69, 90, 22, 43, 76, 10, 60, 29, 49, 9, 9, 15, 73, 85, 80, 81, 35, 87, 43, 17, 57, 38, 44, 29, 86, 96, 15, 57, 26, + 27, 78, 26, 87, 43, 6, 4, 16, 57, 99, 32, 86, 96, 5, 50, 69, 12, 4, 36, 84, 71, 60, 22, 46, 11, 44, 87, 3, 23, 14, + 43, 25, 32, 44, 11, 18, 77, 2, 51, 87, 88, 53, 69, 37, 14, 10, 25, 73, 39, 33, 91, 51, 96, 9, 74, 66, 70, 42, 72, + 7, 82, 40, 91, 33, 83, 54, 33, 50, 83, 1, 81, 32, 66, 11, 75, 56, 53, 45, 1, 69, 46, 31, 79, 58, 12, 20, 92, 49, + 50, 90, 33, 8, 43, 93, 72, 78, 9, 56, 84, 60, 30, 39, 33, 88, 84, 56, 49, 47, 4, 90, 57, 6, 23, 96, 37, 88, 22, 79, + 35, 80, 45, 55}; + + public TestDialog() { + JPanel panel = new JPanel(new GridBagLayout()); + GridBagConstraints gbc = new GridBagConstraints(); + for (int i=0; i < Xs.length; i++) { + gbc.gridx = Xs[i]; + gbc.gridy = 100 - gbc.gridx; + panel.add(new MyComponent(), gbc); + } + getRootPane().getContentPane().add(panel); + pack(); + } + + public static class MyComponent extends JPanel { + private final static Dimension SIZE = new Dimension(1,1); + + public MyComponent() { + setBackground(Color.BLACK); + setOpaque(true); + } + + @Override + public Dimension getPreferredSize() { + return SIZE; + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/Frame/DisposeStressTest/DisposeStressTest.html Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,21 @@ + + + +DisposeStressTest + + + +

DisposeStressTest
Bug ID: 4051487, 4145670

+ +

This is an AUTOMATIC test, simply wait for completion

+ + + + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/Frame/DisposeStressTest/DisposeStressTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +/* + test + @bug 4051487 4145670 8062021 + @summary Tests that disposing of an empty Frame or a Frame with a MenuBar + while it is being created does not crash the VM. + @author dpm area=Threads + @run applet/timeout=7200 DisposeStressTest.html +*/ + +// Note there is no @ in front of test above. This is so that the +// harness will not mistake this file as a test file. It should +// only see the html file as a test file. (the harness runs all +// valid test files, so it would run this test twice if this file +// were valid as well as the html file.) +// Also, note the area= after Your Name in the author tag. Here, you +// should put which functional area the test falls in. See the +// AWT-core home page -> test areas and/or -> AWT team for a list of +// areas. +// Note also the 'DisposeStressTest.html' in the run tag. This should +// be changed to the name of the test. + + +/** + * DisposeStressTest.java + * + * summary: + */ + +import java.applet.Applet; +import java.awt.*; + + +//Automated tests should run as applet tests if possible because they +// get their environments cleaned up, including AWT threads, any +// test created threads, and any system resources used by the test +// such as file descriptors. (This is normally not a problem as +// main tests usually run in a separate VM, however on some platforms +// such as the Mac, separate VMs are not possible and non-applet +// tests will cause problems). Also, you don't have to worry about +// synchronisation stuff in Applet tests they way you do in main +// tests... + + +public class DisposeStressTest extends Applet + { + //Declare things used in the test, like buttons and labels here + + public void init() + { + //Create instructions for the user here, as well as set up + // the environment -- set the layout manager, add buttons, + // etc. + + this.setLayout (new BorderLayout ()); + + String[] instructions = + { + "This is an AUTOMATIC test", + "simply wait until it is done" + }; + Sysout.createDialog( ); + Sysout.printInstructions( instructions ); + + }//End init() + + public void start () + { + for (int i = 0; i < 1000; i++) { + Frame f = new Frame(); + f.setBounds(10, 10, 10, 10); + f.show(); + f.dispose(); + + Frame f2 = new Frame(); + f2.setBounds(10, 10, 100, 100); + MenuBar bar = new MenuBar(); + Menu menu = new Menu(); + menu.add(new MenuItem("foo")); + bar.add(menu); + f2.setMenuBar(bar); + f2.show(); + f2.dispose(); + } + }// start() + + }// class DisposeStressTest + + +/**************************************************** + Standard Test Machinery + DO NOT modify anything below -- it's a standard + chunk of code whose purpose is to make user + interaction uniform, and thereby make it simpler + to read and understand someone else's test. + ****************************************************/ + +/** + This is part of the standard test machinery. + It creates a dialog (with the instructions), and is the interface + for sending text messages to the user. + To print the instructions, send an array of strings to Sysout.createDialog + WithInstructions method. Put one line of instructions per array entry. + To display a message for the tester to see, simply call Sysout.println + with the string to be displayed. + This mimics System.out.println but works within the test harness as well + as standalone. + */ + +class Sysout + { + private static TestDialog dialog; + + public static void createDialogWithInstructions( String[] instructions ) + { + dialog = new TestDialog( new Frame(), "Instructions" ); + dialog.printInstructions( instructions ); + dialog.show(); + println( "Any messages for the tester will display here." ); + } + + public static void createDialog( ) + { + dialog = new TestDialog( new Frame(), "Instructions" ); + String[] defInstr = { "Instructions will appear here. ", "" } ; + dialog.printInstructions( defInstr ); + dialog.show(); + println( "Any messages for the tester will display here." ); + } + + + public static void printInstructions( String[] instructions ) + { + dialog.printInstructions( instructions ); + } + + + public static void println( String messageIn ) + { + dialog.displayMessage( messageIn ); + } + + }// Sysout class + +/** + This is part of the standard test machinery. It provides a place for the + test instructions to be displayed, and a place for interactive messages + to the user to be displayed. + To have the test instructions displayed, see Sysout. + To have a message to the user be displayed, see Sysout. + Do not call anything in this dialog directly. + */ +class TestDialog extends Dialog + { + + TextArea instructionsText; + TextArea messageText; + int maxStringLength = 80; + + //DO NOT call this directly, go through Sysout + public TestDialog( Frame frame, String name ) + { + super( frame, name ); + int scrollBoth = TextArea.SCROLLBARS_BOTH; + instructionsText = new TextArea( "", 15, maxStringLength, scrollBoth ); + add( "North", instructionsText ); + + messageText = new TextArea( "", 5, maxStringLength, scrollBoth ); + add("South", messageText); + + pack(); + + show(); + }// TestDialog() + + //DO NOT call this directly, go through Sysout + public void printInstructions( String[] instructions ) + { + //Clear out any current instructions + instructionsText.setText( "" ); + + //Go down array of instruction strings + + String printStr, remainingStr; + for( int i=0; i < instructions.length; i++ ) + { + //chop up each into pieces maxSringLength long + remainingStr = instructions[ i ]; + while( remainingStr.length() > 0 ) + { + //if longer than max then chop off first max chars to print + if( remainingStr.length() >= maxStringLength ) + { + //Try to chop on a word boundary + int posOfSpace = remainingStr. + lastIndexOf( ' ', maxStringLength - 1 ); + + if( posOfSpace <= 0 ) posOfSpace = maxStringLength - 1; + + printStr = remainingStr.substring( 0, posOfSpace + 1 ); + remainingStr = remainingStr.substring( posOfSpace + 1 ); + } + //else just print + else + { + printStr = remainingStr; + remainingStr = ""; + } + + instructionsText.append( printStr + "\n" ); + + }// while + + }// for + + }//printInstructions() + + //DO NOT call this directly, go through Sysout + public void displayMessage( String messageIn ) + { + messageText.append( messageIn + "\n" ); + } + + }// TestDialog class --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/Graphics2D/DrawString/DrawStringCrash.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8043508 + * @summary Drawing a very long string crashes VM + */ + +import java.awt.*; +import java.awt.image.*; + +public class DrawStringCrash { + + public static void main(String[] args) { + StringBuffer sb = new StringBuffer(); + String s = "abcdefghijklmnopqrstuzwxyz"; + for (int x = 0; x < 100000 ; x++) { + sb.append(s); + } + // Now have a string which uses approx 5Mb memory + // Loop again drawing doubling each time until + // we reach 8 billion chars or get OOME which means we can't + // go any further. + // Often there is no crash because Java OOM happens + // long before native heap runs out. + long maxLen = 8L * 1024 * 1024 * 1024; + int len = sb.length(); + + BufferedImage bi = + new BufferedImage(100, 100, BufferedImage.TYPE_INT_RGB); + Graphics2D g2d = bi.createGraphics(); + while (len < maxLen) { + try { + g2d.drawString(sb.toString(), 20, 20); + } catch (OutOfMemoryError e) { + return; + } + sb.append(sb); + len *= 2; + } + return; + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/Graphics2D/WhiteTextColorTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.*; +import java.awt.image.*; +import javax.swing.*; + +/** + * @test + * @bug 8056009 + * @summary tests whether Graphics.setColor-calls with Color.white are ignored directly + * after pipeline initialization for a certain set of operations. + * @author ceisserer + */ +public class WhiteTextColorTest extends Frame { + public static volatile boolean success = false; + + public WhiteTextColorTest() { + Image dstImg = getGraphicsConfiguration() + .createCompatibleVolatileImage(30, 20); + Graphics g = dstImg.getGraphics(); + + g.setColor(Color.BLACK); + g.fillRect(0, 0, dstImg.getWidth(null), dstImg.getHeight(null)); + g.setColor(Color.WHITE); + g.drawString("Test", 0, 15); + + BufferedImage readBackImg = new BufferedImage(dstImg.getWidth(null), + dstImg.getHeight(null), BufferedImage.TYPE_INT_RGB); + readBackImg.getGraphics().drawImage(dstImg, 0, 0, null); + + for (int x = 0; x < readBackImg.getWidth(); x++) { + for (int y = 0; y < readBackImg.getHeight(); y++) { + int pixel = readBackImg.getRGB(x, y); + + // In case a single white pixel is found, the + // setColor(Color.WHITE) + // call before was not ignored and the bug is not present + if (pixel == 0xFFFFFFFF) { + return; + } + } + } + + throw new RuntimeException("Test Failed"); + } + + public static void main(String[] args) throws Exception { + SwingUtilities.invokeLater(new Runnable() { + public void run() { + new WhiteTextColorTest(); + } + }); + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/SplashScreen/MultiResolutionSplash/MultiResolutionSplashTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.Color; +import java.awt.Dialog; +import java.awt.Graphics; +import java.awt.Graphics2D; +import java.awt.Panel; +import java.awt.Rectangle; +import java.awt.Robot; +import java.awt.SplashScreen; +import java.awt.Window; +import java.awt.image.BufferedImage; +import java.io.File; +import javax.imageio.ImageIO; +import sun.java2d.SunGraphics2D; + +/** + * @test + * @bug 8043869 + * @author Alexander Scherbatiy + * @summary [macosx] java -splash does not honor 2x hi dpi notation for retina + * support + * @run main MultiResolutionSplashTest GENERATE_IMAGES + * @run main/othervm -splash:splash1.png MultiResolutionSplashTest TEST_SPLASH 0 + * @run main/othervm -splash:splash2 MultiResolutionSplashTest TEST_SPLASH 1 + * @run main/othervm -splash:splash3. MultiResolutionSplashTest TEST_SPLASH 2 + */ +public class MultiResolutionSplashTest { + + private static final int IMAGE_WIDTH = 300; + private static final int IMAGE_HEIGHT = 200; + + private static final ImageInfo[] tests = { + new ImageInfo("splash1.png", "splash1@2x.png", Color.BLUE, Color.GREEN), + new ImageInfo("splash2", "splash2@2x", Color.WHITE, Color.BLACK), + new ImageInfo("splash3.", "splash3@2x.", Color.YELLOW, Color.RED) + }; + + public static void main(String[] args) throws Exception { + + String test = args[0]; + + switch (test) { + case "GENERATE_IMAGES": + generateImages(); + break; + case "TEST_SPLASH": + int index = Integer.parseInt(args[1]); + testSplash(tests[index]); + break; + default: + throw new RuntimeException("Unknown test: " + test); + } + } + + static void testSplash(ImageInfo test) throws Exception { + SplashScreen splashScreen = SplashScreen.getSplashScreen(); + + if (splashScreen == null) { + throw new RuntimeException("Splash screen is not shown!"); + } + + Graphics2D g = splashScreen.createGraphics(); + Rectangle splashBounds = splashScreen.getBounds(); + int screenX = (int) splashBounds.getCenterX(); + int screenY = (int) splashBounds.getCenterY(); + + Robot robot = new Robot(); + Color splashScreenColor = robot.getPixelColor(screenX, screenY); + + float scaleFactor = getScaleFactor(); + Color testColor = (1 < scaleFactor) ? test.color2x : test.color1x; + + if (!testColor.equals(splashScreenColor)) { + throw new RuntimeException( + "Image with wrong resolution is used for splash screen!"); + } + } + + static float getScaleFactor() { + + final Dialog dialog = new Dialog((Window) null); + dialog.setSize(100, 100); + dialog.setModal(true); + final float[] scaleFactors = new float[1]; + Panel panel = new Panel() { + + @Override + public void paint(Graphics g) { + float scaleFactor = 1; + if (g instanceof SunGraphics2D) { + scaleFactor = ((SunGraphics2D) g).surfaceData.getDefaultScale(); + } + scaleFactors[0] = scaleFactor; + dialog.setVisible(false); + } + }; + + dialog.add(panel); + dialog.setVisible(true); + dialog.dispose(); + + return scaleFactors[0]; + } + + static void generateImages() throws Exception { + for (ImageInfo test : tests) { + generateImage(test.name1x, test.color1x, 1); + generateImage(test.name2x, test.color2x, 2); + } + } + + static void generateImage(String name, Color color, int scale) throws Exception { + File file = new File(name); + if (file.exists()) { + return; + } + BufferedImage image = new BufferedImage(scale * IMAGE_WIDTH, scale * IMAGE_HEIGHT, + BufferedImage.TYPE_INT_RGB); + Graphics g = image.getGraphics(); + g.setColor(color); + g.fillRect(0, 0, scale * IMAGE_WIDTH, scale * IMAGE_HEIGHT); + ImageIO.write(image, "png", file); + } + + static class ImageInfo { + + final String name1x; + final String name2x; + final Color color1x; + final Color color2x; + + public ImageInfo(String name1x, String name2x, Color color1x, Color color2x) { + this.name1x = name1x; + this.name2x = name2x; + this.color1x = color1x; + this.color2x = color2x; + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/Window/AlwaysOnTop/SyncAlwaysOnTopFieldTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.Window; + +/** + * @test + * @bug 8064468 + * @author Alexander Scherbatiy + * @summary ownedWindowList access requires synchronization in + * Window.setAlwaysOnTop() method + * @run main SyncAlwaysOnTopFieldTest + */ +public class SyncAlwaysOnTopFieldTest { + + private static final int WINDOWS_COUNT = 200; + private static final int STEPS_COUNT = 20; + + public static void main(String[] args) throws Exception { + final Window rootWindow = createWindow(null); + + new Thread(() -> { + for (int i = 0; i < WINDOWS_COUNT; i++) { + createWindow(rootWindow); + } + }).start(); + + boolean alwaysOnTop = true; + for (int i = 0; i < STEPS_COUNT; i++) { + Thread.sleep(10); + rootWindow.setAlwaysOnTop(alwaysOnTop); + alwaysOnTop = !alwaysOnTop; + } + } + + private static Window createWindow(Window parent) { + Window window = new Window(parent); + window.setSize(200, 200); + window.setVisible(true); + return window; + } +} \ No newline at end of file --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/datatransfer/MappingGenerationTest/MappingGenerationTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.datatransfer.DataFlavor; +import java.awt.datatransfer.SystemFlavorMap; +import java.util.List; + +/* + @test + @bug 4512530 8027148 + @summary tests that mappings for text flavors are generated properly + @author das@sparc.spb.su area=datatransfer +*/ + +public class MappingGenerationTest { + + private static final SystemFlavorMap fm = + (SystemFlavorMap)SystemFlavorMap.getDefaultFlavorMap(); + + public static void main(String[] args) { + test1(); + test2(); + test3(); + test4(); + test5(); + test6(); + } + + /** + * Verifies that Lists returned from getNativesForFlavor() and + * getFlavorsForNative() are not modified with a subsequent call + * to addUnencodedNativeForFlavor() and addFlavorForUnencodedNative() + * respectively. + */ + public static void test1() { + DataFlavor df = new DataFlavor("text/plain-test1", null); + String nat = "native1"; + + List natives = fm.getNativesForFlavor(df); + fm.addUnencodedNativeForFlavor(df, nat); + List nativesNew = fm.getNativesForFlavor(df); + if (natives.equals(nativesNew)) { + System.err.println("orig=" + natives); + System.err.println("new=" + nativesNew); + throw new RuntimeException("Test failed"); + } + + List flavors = fm.getFlavorsForNative(nat); + fm.addFlavorForUnencodedNative(nat, df); + List flavorsNew = fm.getFlavorsForNative(nat); + if (flavors.equals(flavorsNew)) { + System.err.println("orig=" + flavors); + System.err.println("new=" + flavorsNew); + throw new RuntimeException("Test failed"); + } + } + + /** + * Verifies that SystemFlavorMap is not affected by modification of + * the Lists returned from getNativesForFlavor() and + * getFlavorsForNative(). + */ + public static void test2() { + DataFlavor df = new DataFlavor("text/plain-test2", null); + String nat = "native2"; + DataFlavor extraDf = new DataFlavor("text/test", null); + + List natives = fm.getNativesForFlavor(df); + natives.add("Should not be here"); + java.util.List nativesNew = fm.getNativesForFlavor(df); + if (natives.equals(nativesNew)) { + System.err.println("orig=" + natives); + System.err.println("new=" + nativesNew); + throw new RuntimeException("Test failed"); + } + + List flavors = fm.getFlavorsForNative(nat); + flavors.add(extraDf); + java.util.List flavorsNew = fm.getFlavorsForNative(nat); + if (flavors.equals(flavorsNew)) { + System.err.println("orig=" + flavors); + System.err.println("new=" + flavorsNew); + throw new RuntimeException("Test failed"); + } + } + + /** + * Verifies that addUnencodedNativeForFlavor() for a particular text flavor + * doesn't affect mappings for other flavors. + */ + public static void test3() { + DataFlavor df1 = new DataFlavor("text/plain-test3", null); + DataFlavor df2 = new DataFlavor("text/plain-test3; charset=Unicode; class=java.io.Reader", null); + String nat = "native3"; + List natives = fm.getNativesForFlavor(df2); + fm.addUnencodedNativeForFlavor(df1, nat); + List nativesNew = fm.getNativesForFlavor(df2); + if (!natives.equals(nativesNew)) { + System.err.println("orig=" + natives); + System.err.println("new=" + nativesNew); + throw new RuntimeException("Test failed"); + } + } + + /** + * Verifies that addUnencodedNativeForFlavor() really adds the specified + * flavor-to-native mapping to the existing mappings. + */ + public static void test4() { + DataFlavor df = new DataFlavor("text/plain-test4; charset=Unicode; class=java.io.Reader", null); + String nat = "native4"; + List natives = fm.getNativesForFlavor(df); + if (!natives.contains(nat)) { + fm.addUnencodedNativeForFlavor(df, nat); + List nativesNew = fm.getNativesForFlavor(df); + natives.add(nat); + if (!natives.equals(nativesNew)) { + System.err.println("orig=" + natives); + System.err.println("new=" + nativesNew); + throw new RuntimeException("Test failed"); + } + } + } + + /** + * Verifies that a flavor doesn't have any flavor-to-native mappings after + * a call to setNativesForFlavor() with this flavor and an empty native + * array as arguments. + */ + public static void test5() { + final DataFlavor flavor = + new DataFlavor("text/plain-TEST5; charset=Unicode", null); + + fm.getNativesForFlavor(flavor); + + fm.setNativesForFlavor(flavor, new String[0]); + + List natives = fm.getNativesForFlavor(flavor); + + if (!natives.isEmpty()) { + System.err.println("natives=" + natives); + throw new RuntimeException("Test failed"); + } + } + + /** + * Verifies that a native doesn't have any native-to-flavor mappings after + * a call to setFlavorsForNative() with this native and an empty flavor + * array as arguments. + */ + public static void test6() { + final String nat = "STRING"; + fm.getFlavorsForNative(nat); + fm.setFlavorsForNative(nat, new DataFlavor[0]); + + List flavors = fm.getFlavorsForNative(nat); + + if (!flavors.isEmpty()) { + System.err.println("flavors=" + flavors); + throw new RuntimeException("Test failed"); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/event/InputEvent/EventWhenTest/EventWhenTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sun.awt.SunToolkit; + +import java.awt.*; +import java.awt.event.AWTEventListener; +import java.awt.event.InputEvent; +import java.awt.event.KeyEvent; +import java.awt.event.MouseEvent; + +/* + * @test + * @bug 8046495 + * @summary Verifies that mouse/key events has always increasing 'when' timestamps + * @author Anton Nashatyrev + * @run main EventWhenTest + */ +public class EventWhenTest { + + private static volatile int eventsCount = 0; + private static volatile boolean failed = false; + + static { + Toolkit.getDefaultToolkit().addAWTEventListener(new AWTEventListener() { + long lastWhen = 0; + + @Override + public void eventDispatched(AWTEvent event) { + long curWhen; + if (event instanceof KeyEvent) { + curWhen = ((KeyEvent) event).getWhen(); + } else if (event instanceof MouseEvent) { + curWhen = ((MouseEvent) event).getWhen(); + } else { + return; + } + + eventsCount++; + + if (curWhen < lastWhen) { + System.err.println("FAILED: " + curWhen + " < " + lastWhen + + " for " + event); + failed = true; + } else { + lastWhen = curWhen; + } + } + }, AWTEvent.KEY_EVENT_MASK | AWTEvent.MOUSE_EVENT_MASK); + } + + public static void main(String[] args) throws Exception { + + SunToolkit toolkit = (SunToolkit) Toolkit.getDefaultToolkit(); + Frame frame = new Frame(); + + try { + Button b = new Button("Button"); + frame.setBounds(300, 300, 300, 300); + frame.add(b); + frame.setVisible(true); + toolkit.realSync(); + + Robot robot = new Robot(); + robot.mouseMove((int)frame.getLocationOnScreen().getX() + 150, + (int)frame.getLocationOnScreen().getY() + 150); + + eventsCount = 0; + System.out.println("Clicking mouse..."); + for (int i = 0; i < 300 && !failed; i++) { + robot.mousePress(InputEvent.BUTTON1_MASK); + robot.mouseRelease(InputEvent.BUTTON1_MASK); + Thread.sleep(10); + b.setLabel("Click: " + i); + } + + if (eventsCount == 0) { + throw new RuntimeException("No events were received"); + } + + if (failed) { + throw new RuntimeException("Test failed."); + } + System.out.println("Clicking mouse done: " + eventsCount + " events."); + + b.requestFocusInWindow(); + toolkit.realSync(); + + eventsCount = 0; + System.out.println("Typing a key..."); + for (int i = 0; i < 300 && !failed; i++) { + robot.keyPress(KeyEvent.VK_A); + robot.keyRelease(KeyEvent.VK_A); + Thread.sleep(10); + b.setLabel("Type: " + i); + } + System.out.println("Key typing done: " + eventsCount + " events."); + + if (eventsCount == 0) { + throw new RuntimeException("No events were received"); + } + + if (failed) { + throw new RuntimeException("Test failed."); + } + + System.out.println("Success!"); + } finally { + frame.dispose(); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/image/DrawImage/DrawImageCoordsTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8028539 + * @summary Test that drawing a scaled image terminates. + * @run main/othervm/timeout=60 DrawImageCoordsTest +*/ + +import java.awt.Color; +import java.awt.Graphics; +import java.awt.Graphics2D; +import java.awt.geom.AffineTransform; +import java.awt.image.BufferedImage; + +public class DrawImageCoordsTest { + + public static void main(String[] args) { + + /* Create an image to draw, filled in solid red. */ + BufferedImage srcImg = + new BufferedImage(200, 200, BufferedImage.TYPE_INT_RGB); + Graphics srcG = srcImg.createGraphics(); + srcG.setColor(Color.red); + int w = srcImg.getWidth(null); + int h = srcImg.getHeight(null); + srcG.fillRect(0, 0, w, h); + + /* Create a destination image */ + BufferedImage dstImage = + new BufferedImage(200, 200, BufferedImage.TYPE_INT_RGB); + Graphics2D dstG = dstImage.createGraphics(); + /* draw image under a scaling transform that overflows int */ + AffineTransform tx = new AffineTransform(0.5, 0, 0, 0.5, + 0, 5.8658460197478485E9); + dstG.setTransform(tx); + dstG.drawImage(srcImg, 0, 0, null ); + /* draw image under the same overflowing transform, cancelling + * out the 0.5 scale on the graphics + */ + dstG.drawImage(srcImg, 0, 0, 2*w, 2*h, null); + if (Color.red.getRGB() == dstImage.getRGB(w/2, h/2)) { + throw new RuntimeException("Unexpected color: clipping failed."); + } + System.out.println("Test Thread Completed"); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/image/DrawImage/IncorrectAlphaConversionBicubic.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.AlphaComposite; +import java.awt.Color; +import java.awt.Graphics2D; +import java.awt.GraphicsConfiguration; +import java.awt.GraphicsDevice; +import java.awt.GraphicsEnvironment; +import java.awt.RenderingHints; +import java.awt.image.BufferedImage; +import java.awt.image.DataBuffer; +import java.awt.image.DataBufferByte; +import java.awt.image.DataBufferInt; +import java.awt.image.DataBufferShort; +import java.awt.image.VolatileImage; + +import static java.awt.Transparency.TRANSLUCENT; + +/** + * @test + * @bug 8062164 + * @summary We should get correct alpha, when we draw to/from VolatileImage and + * bicubic interpolation is enabled + * @author Sergey Bylokhov + */ +public final class IncorrectAlphaConversionBicubic { + + private static final Color RGB = new Color(200, 255, 7, 123); + private static final int SIZE = 100; + + public static void main(final String[] args) { + final GraphicsEnvironment ge = + GraphicsEnvironment.getLocalGraphicsEnvironment(); + final GraphicsDevice gd = ge.getDefaultScreenDevice(); + final GraphicsConfiguration gc = gd.getDefaultConfiguration(); + final VolatileImage vi = + gc.createCompatibleVolatileImage(SIZE, SIZE, TRANSLUCENT); + final BufferedImage bi = makeUnmanagedBI(gc, TRANSLUCENT); + final int expected = bi.getRGB(2, 2); + + int attempt = 0; + BufferedImage snapshot; + while (true) { + if (++attempt > 10) { + throw new RuntimeException("Too many attempts: " + attempt); + } + vi.validate(gc); + final Graphics2D g2d = vi.createGraphics(); + g2d.setComposite(AlphaComposite.Src); + g2d.scale(2, 2); + g2d.setRenderingHint(RenderingHints.KEY_INTERPOLATION, + RenderingHints.VALUE_INTERPOLATION_BICUBIC); + g2d.drawImage(bi, 0, 0, null); + g2d.dispose(); + + snapshot = vi.getSnapshot(); + if (vi.contentsLost()) { + continue; + } + break; + } + final int actual = snapshot.getRGB(2, 2); + if (actual != expected) { + System.err.println("Actual: " + Integer.toHexString(actual)); + System.err.println("Expected: " + Integer.toHexString(expected)); + throw new RuntimeException("Test failed"); + } + } + + private static BufferedImage makeUnmanagedBI(GraphicsConfiguration gc, + int type) { + BufferedImage img = gc.createCompatibleImage(SIZE, SIZE, type); + Graphics2D g2d = img.createGraphics(); + g2d.setColor(RGB); + g2d.fillRect(0, 0, SIZE, SIZE); + g2d.dispose(); + final DataBuffer db = img.getRaster().getDataBuffer(); + if (db instanceof DataBufferInt) { + ((DataBufferInt) db).getData(); + } else if (db instanceof DataBufferShort) { + ((DataBufferShort) db).getData(); + } else if (db instanceof DataBufferByte) { + ((DataBufferByte) db).getData(); + } else { + try { + img.setAccelerationPriority(0.0f); + } catch (final Throwable ignored) { + } + } + return img; + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/image/DrawImage/IncorrectClipXorModeSW2Surface.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.Color; +import java.awt.Graphics2D; +import java.awt.GraphicsConfiguration; +import java.awt.GraphicsEnvironment; +import java.awt.Image; +import java.awt.Rectangle; +import java.awt.Shape; +import java.awt.geom.AffineTransform; +import java.awt.image.BufferedImage; +import java.awt.image.DataBuffer; +import java.awt.image.DataBufferByte; +import java.awt.image.DataBufferInt; +import java.awt.image.DataBufferShort; +import java.awt.image.VolatileImage; +import java.io.File; +import java.io.IOException; + +import javax.imageio.ImageIO; + +import static java.awt.geom.Rectangle2D.Double; + +/** + * @test + * @bug 8061456 + * @summary Tests drawing BI to volatile image using different clips + xor mode. + * Results of the blit BI to compatibleImage is used for comparison. + * @author Sergey Bylokhov + */ +public final class IncorrectClipXorModeSW2Surface { + + private static int[] SIZES = {2, 10, 100}; + private static final Shape[] SHAPES = { + new Rectangle(0, 0, 0, 0), + new Rectangle(0, 0, 1, 1), + new Rectangle(0, 1, 1, 1), + new Rectangle(1, 0, 1, 1), + new Rectangle(1, 1, 1, 1), + + new Double(0, 0, 0.5, 0.5), + new Double(0, 0.5, 0.5, 0.5), + new Double(0.5, 0, 0.5, 0.5), + new Double(0.5, 0.5, 0.5, 0.5), + new Double(0.25, 0.25, 0.5, 0.5), + new Double(0, 0.25, 1, 0.5), + new Double(0.25, 0, 0.5, 1), + + new Double(.10, .10, .20, .20), + new Double(.75, .75, .20, .20), + new Double(.75, .10, .20, .20), + new Double(.10, .75, .20, .20), + }; + + public static void main(final String[] args) throws IOException { + GraphicsEnvironment ge = GraphicsEnvironment + .getLocalGraphicsEnvironment(); + GraphicsConfiguration gc = ge.getDefaultScreenDevice() + .getDefaultConfiguration(); + AffineTransform at; + for (int size : SIZES) { + at = AffineTransform.getScaleInstance(size, size); + for (Shape clip : SHAPES) { + clip = at.createTransformedShape(clip); + for (Shape to : SHAPES) { + to = at.createTransformedShape(to); + // Prepare test images + BufferedImage snapshot; + BufferedImage bi = getBufferedImage(size); + VolatileImage vi = getVolatileImage(gc, size); + while (true) { + vi.validate(gc); + Graphics2D g2d = vi.createGraphics(); + g2d.setColor(Color.GREEN); + g2d.fillRect(0, 0, size, size); + g2d.dispose(); + if (vi.validate(gc) != VolatileImage.IMAGE_OK) { + continue; + } + draw(clip, to, bi, vi); + snapshot = vi.getSnapshot(); + if (vi.contentsLost()) { + continue; + } + break; + } + // Prepare gold images + BufferedImage goldvi = getCompatibleImage(gc, size); + BufferedImage goldbi = getBufferedImage(size); + draw(clip, to, goldbi, goldvi); + validate(snapshot, goldvi); + vi.flush(); + } + } + } + } + + private static void draw(Shape clip, Shape shape, Image from, Image to) { + Graphics2D g2d = (Graphics2D) to.getGraphics(); + g2d.setXORMode(Color.BLACK); + g2d.setClip(clip); + Rectangle toBounds = shape.getBounds(); + g2d.drawImage(from, toBounds.x, toBounds.y, toBounds.width, + toBounds.height, null); + g2d.dispose(); + } + + private static BufferedImage getBufferedImage(int sw) { + final BufferedImage bi = new BufferedImage(sw, sw, BufferedImage.TYPE_INT_ARGB); + Graphics2D g2d = bi.createGraphics(); + g2d.setColor(Color.RED); + g2d.fillRect(0, 0, sw, sw); + g2d.dispose(); + + final DataBuffer db = bi.getRaster().getDataBuffer(); + if (db instanceof DataBufferInt) { + ((DataBufferInt) db).getData(); + } else if (db instanceof DataBufferShort) { + ((DataBufferShort) db).getData(); + } else if (db instanceof DataBufferByte) { + ((DataBufferByte) db).getData(); + } else { + try { + bi.setAccelerationPriority(0.0f); + } catch (final Throwable ignored) { + } + } + return bi; + } + + private static VolatileImage getVolatileImage(GraphicsConfiguration gc, + int size) { + return gc.createCompatibleVolatileImage(size, size); + } + + private static BufferedImage getCompatibleImage(GraphicsConfiguration gc, + int size) { + BufferedImage image = gc.createCompatibleImage(size, size); + Graphics2D g2d = image.createGraphics(); + g2d.setColor(Color.GREEN); + g2d.fillRect(0, 0, size, size); + g2d.dispose(); + return image; + } + + private static void validate(BufferedImage bi, BufferedImage goldbi) + throws IOException { + for (int x = 0; x < bi.getWidth(); ++x) { + for (int y = 0; y < bi.getHeight(); ++y) { + if (goldbi.getRGB(x, y) != bi.getRGB(x, y)) { + ImageIO.write(bi, "png", new File("actual.png")); + ImageIO.write(goldbi, "png", new File("expected.png")); + throw new RuntimeException("Test failed."); + } + } + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/image/DrawImage/IncorrectUnmanagedImageRotatedClip.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.AlphaComposite; +import java.awt.Color; +import java.awt.Graphics2D; +import java.awt.GraphicsConfiguration; +import java.awt.GraphicsEnvironment; +import java.awt.Image; +import java.awt.Rectangle; +import java.awt.image.BufferedImage; +import java.awt.image.DataBuffer; +import java.awt.image.DataBufferByte; +import java.awt.image.DataBufferInt; +import java.awt.image.DataBufferShort; +import java.awt.image.VolatileImage; +import java.io.File; +import java.io.IOException; + +import javax.imageio.ImageIO; + +import static java.awt.Transparency.TRANSLUCENT; +import static java.awt.image.BufferedImage.TYPE_INT_ARGB; + +/** + * @test + * @bug 8059942 + * @summary Tests rotated clip when unmanaged image is drawn to VI. + * Results of the blit to compatibleImage are used for comparison. + * @author Sergey Bylokhov + */ +public final class IncorrectUnmanagedImageRotatedClip { + + public static void main(final String[] args) throws IOException { + BufferedImage bi = makeUnmanagedBI(); + fill(bi); + test(bi); + } + + private static void test(final BufferedImage bi) throws IOException { + GraphicsEnvironment ge = GraphicsEnvironment + .getLocalGraphicsEnvironment(); + GraphicsConfiguration gc = ge.getDefaultScreenDevice() + .getDefaultConfiguration(); + VolatileImage vi = gc.createCompatibleVolatileImage(500, 200, + TRANSLUCENT); + BufferedImage gold = gc.createCompatibleImage(500, 200, TRANSLUCENT); + // draw to compatible Image + draw(bi, gold); + // draw to volatile image + int attempt = 0; + BufferedImage snapshot; + while (true) { + if (++attempt > 10) { + throw new RuntimeException("Too many attempts: " + attempt); + } + vi.validate(gc); + if (vi.validate(gc) != VolatileImage.IMAGE_OK) { + continue; + } + draw(bi, vi); + snapshot = vi.getSnapshot(); + if (vi.contentsLost()) { + continue; + } + break; + } + // validate images + for (int x = 0; x < gold.getWidth(); ++x) { + for (int y = 0; y < gold.getHeight(); ++y) { + if (gold.getRGB(x, y) != snapshot.getRGB(x, y)) { + ImageIO.write(gold, "png", new File("gold.png")); + ImageIO.write(snapshot, "png", new File("bi.png")); + throw new RuntimeException("Test failed."); + } + } + } + } + + private static void draw(final BufferedImage from,final Image to) { + final Graphics2D g2d = (Graphics2D) to.getGraphics(); + g2d.setComposite(AlphaComposite.Src); + g2d.setColor(Color.ORANGE); + g2d.fillRect(0, 0, to.getWidth(null), to.getHeight(null)); + g2d.rotate(Math.toRadians(45)); + g2d.clip(new Rectangle(41, 42, 43, 44)); + g2d.drawImage(from, 50, 50, Color.blue, null); + g2d.dispose(); + } + + private static BufferedImage makeUnmanagedBI() { + final BufferedImage bi = new BufferedImage(500, 200, TYPE_INT_ARGB); + final DataBuffer db = bi.getRaster().getDataBuffer(); + if (db instanceof DataBufferInt) { + ((DataBufferInt) db).getData(); + } else if (db instanceof DataBufferShort) { + ((DataBufferShort) db).getData(); + } else if (db instanceof DataBufferByte) { + ((DataBufferByte) db).getData(); + } else { + try { + bi.setAccelerationPriority(0.0f); + } catch (final Throwable ignored) { + } + } + return bi; + } + + private static void fill(final Image image) { + final Graphics2D graphics = (Graphics2D) image.getGraphics(); + graphics.setComposite(AlphaComposite.Src); + for (int i = 0; i < image.getHeight(null); ++i) { + graphics.setColor(new Color(i, 0, 0)); + graphics.fillRect(0, i, image.getWidth(null), 1); + } + graphics.dispose(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/image/DrawImage/IncorrectUnmanagedImageSourceOffset.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.AlphaComposite; +import java.awt.Color; +import java.awt.Graphics2D; +import java.awt.GraphicsConfiguration; +import java.awt.GraphicsEnvironment; +import java.awt.Image; +import java.awt.image.BufferedImage; +import java.awt.image.DataBuffer; +import java.awt.image.DataBufferByte; +import java.awt.image.DataBufferInt; +import java.awt.image.DataBufferShort; +import java.awt.image.VolatileImage; +import java.io.File; +import java.io.IOException; + +import javax.imageio.ImageIO; + +import static java.awt.Transparency.*; +import static java.awt.image.BufferedImage.*; + +/** + * @test + * @bug 8029253 + * @summary Tests asymmetric source offsets when unmanaged image is drawn to VI. + * Results of the blit to compatibleImage are used for comparison. + * @author Sergey Bylokhov + */ +public final class IncorrectUnmanagedImageSourceOffset { + + private static final int[] TYPES = {TYPE_INT_RGB, TYPE_INT_ARGB, + TYPE_INT_ARGB_PRE, TYPE_INT_BGR, + TYPE_3BYTE_BGR, TYPE_4BYTE_ABGR, + TYPE_4BYTE_ABGR_PRE, + /*TYPE_USHORT_565_RGB, + TYPE_USHORT_555_RGB, TYPE_BYTE_GRAY, + TYPE_USHORT_GRAY,*/ TYPE_BYTE_BINARY, + TYPE_BYTE_INDEXED}; + private static final int[] TRANSPARENCIES = {OPAQUE, BITMASK, TRANSLUCENT}; + + public static void main(final String[] args) throws IOException { + for (final int viType : TRANSPARENCIES) { + for (final int biType : TYPES) { + BufferedImage bi = makeUnmanagedBI(biType); + fill(bi); + test(bi, viType); + } + } + } + + private static void test(BufferedImage bi, int type) + throws IOException { + GraphicsEnvironment ge = GraphicsEnvironment + .getLocalGraphicsEnvironment(); + GraphicsConfiguration gc = ge.getDefaultScreenDevice() + .getDefaultConfiguration(); + VolatileImage vi = gc.createCompatibleVolatileImage(511, 255, type); + BufferedImage gold = gc.createCompatibleImage(511, 255, type); + // draw to compatible Image + Graphics2D big = gold.createGraphics(); + // force scaled blit + big.drawImage(bi, 7, 11, 127, 111, 7, 11, 127 * 2, 111, null); + big.dispose(); + // draw to volatile image + BufferedImage snapshot; + while (true) { + vi.validate(gc); + if (vi.validate(gc) != VolatileImage.IMAGE_OK) { + try { + Thread.sleep(100); + } catch (final InterruptedException ignored) { + } + continue; + } + Graphics2D vig = vi.createGraphics(); + // force scaled blit + vig.drawImage(bi, 7, 11, 127, 111, 7, 11, 127 * 2, 111, null); + vig.dispose(); + snapshot = vi.getSnapshot(); + if (vi.contentsLost()) { + try { + Thread.sleep(100); + } catch (final InterruptedException ignored) { + } + continue; + } + break; + } + // validate images + for (int x = 7; x < 127; ++x) { + for (int y = 11; y < 111; ++y) { + if (gold.getRGB(x, y) != snapshot.getRGB(x, y)) { + ImageIO.write(gold, "png", new File("gold.png")); + ImageIO.write(snapshot, "png", new File("bi.png")); + throw new RuntimeException("Test failed."); + } + } + } + } + + private static BufferedImage makeUnmanagedBI(final int type) { + final BufferedImage bi = new BufferedImage(511, 255, type); + final DataBuffer db = bi.getRaster().getDataBuffer(); + if (db instanceof DataBufferInt) { + ((DataBufferInt) db).getData(); + } else if (db instanceof DataBufferShort) { + ((DataBufferShort) db).getData(); + } else if (db instanceof DataBufferByte) { + ((DataBufferByte) db).getData(); + } else { + try { + bi.setAccelerationPriority(0.0f); + } catch (final Throwable ignored) { + } + } + return bi; + } + + private static void fill(final Image image) { + final Graphics2D graphics = (Graphics2D) image.getGraphics(); + graphics.setComposite(AlphaComposite.Src); + for (int i = 0; i < image.getHeight(null); ++i) { + graphics.setColor(new Color(i, 0, 0)); + graphics.fillRect(0, i, image.getWidth(null), 1); + } + graphics.dispose(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/image/DrawImage/UnmanagedDrawImagePerformance.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.AlphaComposite; +import java.awt.Graphics2D; +import java.awt.GraphicsConfiguration; +import java.awt.GraphicsDevice; +import java.awt.GraphicsEnvironment; +import java.awt.Image; +import java.awt.Polygon; +import java.awt.geom.AffineTransform; +import java.awt.image.BufferedImage; +import java.awt.image.DataBuffer; +import java.awt.image.DataBufferByte; +import java.awt.image.DataBufferInt; +import java.awt.image.DataBufferShort; +import java.awt.image.VolatileImage; + +import static java.awt.Transparency.*; +import static java.awt.image.BufferedImage.*; + +/* + * @test + * @bug 8029253 8059941 + * @summary Unmanaged images should be drawn fast. + * @author Sergey Bylokhov + */ +public final class UnmanagedDrawImagePerformance { + + private static final int[] TYPES = {TYPE_INT_RGB, TYPE_INT_ARGB, + TYPE_INT_ARGB_PRE, TYPE_INT_BGR, + TYPE_3BYTE_BGR, TYPE_4BYTE_ABGR, + TYPE_4BYTE_ABGR_PRE, + TYPE_USHORT_565_RGB, + TYPE_USHORT_555_RGB, TYPE_BYTE_GRAY, + TYPE_USHORT_GRAY, TYPE_BYTE_BINARY, + TYPE_BYTE_INDEXED}; + private static final int[] TRANSPARENCIES = {OPAQUE, BITMASK, TRANSLUCENT}; + private static final int SIZE = 1000; + private static final AffineTransform[] TRANSFORMS = { + AffineTransform.getScaleInstance(.5, .5), + AffineTransform.getScaleInstance(1, 1), + AffineTransform.getScaleInstance(2, 2), + AffineTransform.getShearInstance(7, 11)}; + + public static void main(final String[] args) { + for (final AffineTransform atfm : TRANSFORMS) { + for (final int viType : TRANSPARENCIES) { + for (final int biType : TYPES) { + final BufferedImage bi = makeUnmanagedBI(biType); + final VolatileImage vi = makeVI(viType); + final long time = test(bi, vi, atfm) / 1000000000; + if (time > 1) { + throw new RuntimeException(String.format( + "drawImage is slow: %d seconds", time)); + } + } + } + } + } + + private static long test(Image bi, Image vi, AffineTransform atfm) { + final Polygon p = new Polygon(); + p.addPoint(0, 0); + p.addPoint(SIZE, 0); + p.addPoint(0, SIZE); + p.addPoint(SIZE, SIZE); + p.addPoint(0, 0); + Graphics2D g2d = (Graphics2D) vi.getGraphics(); + g2d.clip(p); + g2d.transform(atfm); + g2d.setComposite(AlphaComposite.SrcOver); + final long start = System.nanoTime(); + g2d.drawImage(bi, 0, 0, null); + final long time = System.nanoTime() - start; + g2d.dispose(); + return time; + } + + private static VolatileImage makeVI(final int type) { + final GraphicsEnvironment ge = GraphicsEnvironment + .getLocalGraphicsEnvironment(); + final GraphicsDevice gd = ge.getDefaultScreenDevice(); + final GraphicsConfiguration gc = gd.getDefaultConfiguration(); + return gc.createCompatibleVolatileImage(SIZE, SIZE, type); + } + + private static BufferedImage makeUnmanagedBI(final int type) { + final BufferedImage img = new BufferedImage(SIZE, SIZE, type); + final DataBuffer db = img.getRaster().getDataBuffer(); + if (db instanceof DataBufferInt) { + ((DataBufferInt) db).getData(); + } else if (db instanceof DataBufferShort) { + ((DataBufferShort) db).getData(); + } else if (db instanceof DataBufferByte) { + ((DataBufferByte) db).getData(); + } else { + try { + img.setAccelerationPriority(0.0f); + } catch (final Throwable ignored) { + } + } + return img; + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/image/MultiResolutionImage/MultiResolutionImageObserverTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +import java.awt.Color; +import java.awt.Graphics; +import java.awt.Graphics2D; +import java.awt.Image; +import java.awt.Toolkit; +import java.awt.image.BufferedImage; +import java.awt.image.ImageObserver; +import static java.awt.image.ImageObserver.*; +import java.io.File; +import javax.imageio.ImageIO; +/* + * @test + * @bug 8065627 + * @summary Animated GIFs fail to display on a HiDPI display + * @author Alexander Scherbatiy + * @run main MultiResolutionImageObserverTest + */ + +public class MultiResolutionImageObserverTest { + + private static final int TIMEOUT = 500; + + public static void main(String[] args) throws Exception { + + generateImages(); + Toolkit toolkit = Toolkit.getDefaultToolkit(); + Image image = Toolkit.getDefaultToolkit().getImage(IMAGE_NAME_1X); + + LoadImageObserver sizeObserver + = new LoadImageObserver(WIDTH | HEIGHT); + toolkit.prepareImage(image, -1, -1, sizeObserver); + waitForImageLoading(sizeObserver, "The first observer is not called"); + + LoadImageObserver bitsObserver + = new LoadImageObserver(SOMEBITS | FRAMEBITS | ALLBITS); + + BufferedImage buffImage = new BufferedImage(100, 100, BufferedImage.TYPE_INT_RGB); + Graphics2D g2d = (Graphics2D) buffImage.createGraphics(); + g2d.scale(2, 2); + g2d.drawImage(image, 0, 0, bitsObserver); + waitForImageLoading(bitsObserver, "The second observer is not called!"); + g2d.dispose(); + } + + private static void waitForImageLoading(LoadImageObserver observer, + String errorMessage) throws Exception { + + long endTime = System.currentTimeMillis() + TIMEOUT; + + while (!observer.loaded && System.currentTimeMillis() < endTime) { + Thread.sleep(TIMEOUT / 10); + } + + if (!observer.loaded) { + throw new RuntimeException(errorMessage); + } + } + + private static final String IMAGE_NAME_1X = "image.png"; + private static final String IMAGE_NAME_2X = "image@2x.png"; + + private static void generateImages() throws Exception { + generateImage(1); + generateImage(2); + } + + private static void generateImage(int scale) throws Exception { + BufferedImage image = new BufferedImage( + scale * 200, scale * 300, + BufferedImage.TYPE_INT_RGB); + Graphics g = image.createGraphics(); + g.setColor(scale == 1 ? Color.GREEN : Color.BLUE); + g.fillRect(0, 0, scale * 200, scale * 300); + File file = new File(scale == 1 ? IMAGE_NAME_1X : IMAGE_NAME_2X); + ImageIO.write(image, "png", file); + g.dispose(); + } + + private static class LoadImageObserver implements ImageObserver { + + private final int infoflags; + private boolean loaded; + + public LoadImageObserver(int flags) { + this.infoflags = flags; + } + + @Override + public boolean imageUpdate(Image img, int flags, int x, int y, int width, int height) { + + if ((flags & infoflags) != 0) { + loaded = true; + } + + return !loaded; + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/awt/print/PrinterJob/ImagePrinting/NullClipARGB.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8061392 + * @summary Test no NPE when printing transparency with null clip. + */ + +import java.awt.*; +import java.awt.image.*; +import java.awt.print.*; + +public class NullClipARGB implements Printable { + + public static void main( String[] args ) { + + try { + PrinterJob pj = PrinterJob.getPrinterJob(); + pj.setPrintable(new NullClipARGB()); + pj.print(); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } + + public int print(Graphics g, PageFormat pf, int pageIndex) + throws PrinterException{ + + if (pageIndex != 0) { + return NO_SUCH_PAGE; + } + Graphics2D g2 = (Graphics2D)g; + System.out.println("original clip="+g2.getClip()); + g2.translate(pf.getImageableX(), pf.getImageableY()); + g2.rotate(0.2); + g2.setClip(null); + g2.setColor( Color.BLACK ); + g2.drawString("This text should be visible through the image", 0, 20); + BufferedImage bi = new BufferedImage(100, 100, + BufferedImage.TYPE_INT_ARGB ); + Graphics ig = bi.createGraphics(); + ig.setColor( new Color( 192, 192, 192, 80 ) ); + ig.fillRect( 0, 0, 100, 100 ); + ig.setColor( Color.BLACK ); + ig.drawRect( 0, 0, 99, 99 ); + ig.dispose(); + g2.drawImage(bi, 10, 0, 90, 90, null ); + g2.translate(100, 100); + g2.drawString("This text should also be visible through the image", 0, 20); + g2.drawImage(bi, 10, 0, 90, 90, null ); + return PAGE_EXISTS; + } +} --- ./jdk/test/java/beans/Introspector/Test4168833.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/beans/Introspector/Test4168833.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2007, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,10 +23,11 @@ /* * @test - * @bug 4168833 + * @bug 4168833 8034085 * @summary Tests that Introspector does not create IndexedPropertyDescriptor * from non-indexed PropertyDescriptor * @author Mark Davidson + * @author Sergey Malenkov */ import java.awt.Color; @@ -42,10 +43,6 @@ */ public class Test4168833 { public static void main(String[] args) throws Exception { - IndexedPropertyDescriptor ipd = BeanUtils.getIndexedPropertyDescriptor(Base.class, "prop"); - if (!ipd.getIndexedPropertyType().equals(Dimension.class)) { - error(ipd, "Base.prop property should a Dimension"); - } // When the Sub class is introspected, // the property type should be color. // The complete "classic" set of properties --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/beans/Introspector/Test8034085.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.Window; +import java.beans.IndexedPropertyDescriptor; +import java.beans.PropertyDescriptor; + +/* + * @test + * @bug 8034085 + * @summary Tests that Introspector ignores indexed getter and setter for incorrect types + * @author Sergey Malenkov + */ + +public class Test8034085 { + public static final StringBuilder ERROR = new StringBuilder(); + + public static void main(String[] args) { + test(Window.class, false, true, false, false); + + test(Bean0000.class, false, false, false, false); + test(Bean0001.class, false, false, false, true); + test(Bean0010.class, false, false, true, false); + test(Bean0011.class, false, false, true, true); + test(Bean0100.class, false, true, false, false); + test(Bean0101.class, false, true, false, false); + test(Bean0110.class, false, true, false, false); + test(Bean0111.class, false, true, false, false); + test(Bean1000.class, true, false, false, false); + test(Bean1001.class, true, false, false, false); + test(Bean1010.class, true, false, false, false); + test(Bean1011.class, true, false, false, false); + test(Bean1100.class, true, true, false, false); + test(Bean1101.class, true, true, false, false); + test(Bean1110.class, true, true, false, false); + test(Bean1111.class, true, true, false, false); + + if (0 < ERROR.length()) { + throw new Error(ERROR.toString()); + } + } + + private static void test(Class type, boolean read, boolean write, boolean readIndexed, boolean writeIndexed) { + PropertyDescriptor pd = BeanUtils.findPropertyDescriptor(type, "size"); + if (pd != null) { + test(type, "read", read, null != pd.getReadMethod()); + test(type, "write", write, null != pd.getWriteMethod()); + if (pd instanceof IndexedPropertyDescriptor) { + IndexedPropertyDescriptor ipd = (IndexedPropertyDescriptor) pd; + test(type, "indexed read", readIndexed, null != ipd.getIndexedReadMethod()); + test(type, "indexed write", writeIndexed, null != ipd.getIndexedWriteMethod()); + } else if (readIndexed || writeIndexed) { + error(type, "indexed property does not exist"); + } + } else if (read || write || readIndexed || writeIndexed) { + error(type, "property does not exist"); + } + } + + private static void test(Class type, String name, boolean expected, boolean actual) { + if (expected && !actual) { + error(type, name + " method does not exist"); + } else if (!expected && actual) { + error(type, name + " method is not expected"); + } + } + + private static void error(Class type, String message) { + ERROR.append("\n\t\t").append(type.getSimpleName()).append(".size: ").append(message); + } + + public static class Bean0000 { + } + + public static class Bean0001 { + public void setSize(int index, int value) { + } + } + + public static class Bean0010 { + public int getSize(int index) { + return 0; + } + } + + public static class Bean0011 { + public int getSize(int index) { + return 0; + } + + public void setSize(int index, int value) { + } + } + + public static class Bean0100 { + public void setSize(int value) { + } + } + + public static class Bean0101 { + public void setSize(int value) { + } + + public void setSize(int index, int value) { + } + } + + public static class Bean0110 { + public void setSize(int value) { + } + + public int getSize(int index) { + return 0; + } + } + + public static class Bean0111 { + public void setSize(int value) { + } + + public int getSize(int index) { + return 0; + } + + public void setSize(int index, int value) { + } + } + + public static class Bean1000 { + public int getSize() { + return 0; + } + } + + public static class Bean1001 { + public int getSize() { + return 0; + } + + public void setSize(int index, int value) { + } + } + + public static class Bean1010 { + public int getSize() { + return 0; + } + + public int getSize(int index) { + return 0; + } + } + + public static class Bean1011 { + public int getSize() { + return 0; + } + + public int getSize(int index) { + return 0; + } + + public void setSize(int index, int value) { + } + } + + public static class Bean1100 { + public int getSize() { + return 0; + } + + public void setSize(int value) { + } + } + + public static class Bean1101 { + public int getSize() { + return 0; + } + + public void setSize(int value) { + } + + public void setSize(int index, int value) { + } + } + + public static class Bean1110 { + public int getSize() { + return 0; + } + + public void setSize(int value) { + } + + public int getSize(int index) { + return 0; + } + } + + public static class Bean1111 { + public int getSize() { + return 0; + } + + public void setSize(int value) { + } + + public int getSize(int index) { + return 0; + } + + public void setSize(int index, int value) { + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/beans/Introspector/Test8034164.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.beans.IndexedPropertyDescriptor; +import java.beans.PropertyDescriptor; + +/* + * @test + * @bug 8034164 + * @summary Tests that Introspector does not ignore indexed getter and setter for correct types + * @author Sergey Malenkov + */ + +public class Test8034164 { + public static final StringBuilder ERROR = new StringBuilder(); + + public static void main(String[] args) { + test(Bean0000.class, false, false, false, false); + test(Bean0001.class, false, false, false, true); + test(Bean0010.class, false, false, true, false); + test(Bean0011.class, false, false, true, true); + test(Bean0100.class, false, true, false, false); + test(Bean0101.class, false, true, false, true); + test(Bean0110.class, false, true, true, false); + test(Bean0111.class, false, true, true, true); + test(Bean1000.class, true, false, false, false); + test(Bean1001.class, true, false, false, true); + test(Bean1010.class, true, false, true, false); + test(Bean1011.class, true, false, true, true); + test(Bean1100.class, true, true, false, false); + test(Bean1101.class, true, true, false, true); + test(Bean1110.class, true, true, true, false); + test(Bean1111.class, true, true, true, true); + + if (0 < ERROR.length()) { + throw new Error(ERROR.toString()); + } + } + + private static void test(Class type, boolean read, boolean write, boolean readIndexed, boolean writeIndexed) { + PropertyDescriptor pd = BeanUtils.findPropertyDescriptor(type, "size"); + if (pd != null) { + test(type, "read", read, null != pd.getReadMethod()); + test(type, "write", write, null != pd.getWriteMethod()); + if (pd instanceof IndexedPropertyDescriptor) { + IndexedPropertyDescriptor ipd = (IndexedPropertyDescriptor) pd; + test(type, "indexed read", readIndexed, null != ipd.getIndexedReadMethod()); + test(type, "indexed write", writeIndexed, null != ipd.getIndexedWriteMethod()); + } else if (readIndexed || writeIndexed) { + error(type, "indexed property does not exist"); + } + } else if (read || write || readIndexed || writeIndexed) { + error(type, "property does not exist"); + } + } + + private static void test(Class type, String name, boolean expected, boolean actual) { + if (expected && !actual) { + error(type, name + " method does not exist"); + } else if (!expected && actual) { + error(type, name + " method is not expected"); + } + } + + private static void error(Class type, String message) { + ERROR.append("\n\t\t").append(type.getSimpleName()).append(".size: ").append(message); + } + + public static class Bean0000 { + } + + public static class Bean0001 { + public void setSize(int index, int value) { + } + } + + public static class Bean0010 { + public int getSize(int index) { + return 0; + } + } + + public static class Bean0011 { + public int getSize(int index) { + return 0; + } + + public void setSize(int index, int value) { + } + } + + public static class Bean0100 { + public void setSize(int[] value) { + } + } + + public static class Bean0101 { + public void setSize(int[] value) { + } + + public void setSize(int index, int value) { + } + } + + public static class Bean0110 { + public void setSize(int[] value) { + } + + public int getSize(int index) { + return 0; + } + } + + public static class Bean0111 { + public void setSize(int[] value) { + } + + public int getSize(int index) { + return 0; + } + + public void setSize(int index, int value) { + } + } + + public static class Bean1000 { + public int[] getSize() { + return null; + } + } + + public static class Bean1001 { + public int[] getSize() { + return null; + } + + public void setSize(int index, int value) { + } + } + + public static class Bean1010 { + public int[] getSize() { + return null; + } + + public int getSize(int index) { + return 0; + } + } + + public static class Bean1011 { + public int[] getSize() { + return null; + } + + public int getSize(int index) { + return 0; + } + + public void setSize(int index, int value) { + } + } + + public static class Bean1100 { + public int[] getSize() { + return null; + } + + public void setSize(int[] value) { + } + } + + public static class Bean1101 { + public int[] getSize() { + return null; + } + + public void setSize(int[] value) { + } + + public void setSize(int index, int value) { + } + } + + public static class Bean1110 { + public int[] getSize() { + return null; + } + + public void setSize(int[] value) { + } + + public int getSize(int index) { + return 0; + } + } + + public static class Bean1111 { + public int[] getSize() { + return null; + } + + public void setSize(int[] value) { + } + + public int getSize(int index) { + return 0; + } + + public void setSize(int index, int value) { + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/io/SequenceInputStream/LotsOfStreams.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + * @bug 7011804 + * @summary SequenceInputStream#read() was implemented recursivly, + * which may cause stack overflow + */ + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.io.SequenceInputStream; +import java.util.Enumeration; + +public class LotsOfStreams { + + static final int MAX_SUBSTREAMS = 32000; + + public static void main(String[] argv) throws Exception { + try (InputStream stream = + new SequenceInputStream(new LOSEnumeration())) { + stream.read(); + } + try (InputStream stream = + new SequenceInputStream(new LOSEnumeration())) { + byte[] b = new byte[1]; + stream.read(b, 0, 1); + } + } + + static class LOSEnumeration + implements Enumeration { + + private static InputStream inputStream = + new ByteArrayInputStream(new byte[0]); + private int left = MAX_SUBSTREAMS; + + public boolean hasMoreElements() { + return (left > 0); + } + public InputStream nextElement() { + left--; + return inputStream; + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/Class/getDeclaredField/ClassDeclaredFieldsTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.reflect.Field; +import java.lang.reflect.ReflectPermission; +import java.security.CodeSource; +import java.security.Permission; +import java.security.PermissionCollection; +import java.security.Permissions; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.util.Arrays; +import java.util.Enumeration; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * @test + * @bug 8065552 + * @summary test that all fields returned by getDeclaredFields() can be + * set accessible if the right permission is granted; this test + * also verifies that Class.classLoader final private field is + * hidden from reflection access. + * @run main/othervm ClassDeclaredFieldsTest UNSECURE + * @run main/othervm ClassDeclaredFieldsTest SECURE + * + * @author danielfuchs + */ +public class ClassDeclaredFieldsTest { + + // Test with or without a security manager + public static enum TestCase { + UNSECURE, SECURE; + public void run() throws Exception { + System.out.println("Running test case: " + name()); + Configure.setUp(this); + test(this); + } + } + /** + * @param args the command line arguments + */ + public static void main(String[] args) throws Exception { + System.out.println(System.getProperty("java.version")); + if (args == null || args.length == 0) { + args = new String[] { "SECURE" }; + } else if (args.length != 1) { + throw new IllegalArgumentException("Only one arg expected: " + + Arrays.asList(args)); + } + TestCase.valueOf(args[0]).run(); + } + + static void test(TestCase test) { + for (Field f : Class.class.getDeclaredFields()) { + f.setAccessible(true); + System.out.println("Field "+f.getName()+" is now accessible."); + if (f.getName().equals("classLoader")) { + throw new RuntimeException("Found "+f.getName()+" field!"); + } + } + try { + Class.class.getDeclaredField("classLoader"); + throw new RuntimeException("Expected NoSuchFieldException for" + + " 'classLoader' field not raised"); + } catch(NoSuchFieldException x) { + System.out.println("Got expected exception: " + x); + } + System.out.println("Passed "+test); + } + + // A helper class to configure the security manager for the test, + // and bypass it when needed. + static class Configure { + static Policy policy = null; + static final ThreadLocal allowAll = new ThreadLocal() { + @Override + protected AtomicBoolean initialValue() { + return new AtomicBoolean(false); + } + }; + static void setUp(TestCase test) { + switch (test) { + case SECURE: + if (policy == null && System.getSecurityManager() != null) { + throw new IllegalStateException("SecurityManager already set"); + } else if (policy == null) { + policy = new SimplePolicy(TestCase.SECURE, allowAll); + Policy.setPolicy(policy); + System.setSecurityManager(new SecurityManager()); + } + if (System.getSecurityManager() == null) { + throw new IllegalStateException("No SecurityManager."); + } + if (policy == null) { + throw new IllegalStateException("policy not configured"); + } + break; + case UNSECURE: + if (System.getSecurityManager() != null) { + throw new IllegalStateException("SecurityManager already set"); + } + break; + default: + throw new InternalError("No such testcase: " + test); + } + } + static void doPrivileged(Runnable run) { + allowAll.get().set(true); + try { + run.run(); + } finally { + allowAll.get().set(false); + } + } + } + + // A Helper class to build a set of permissions. + final static class PermissionsBuilder { + final Permissions perms; + public PermissionsBuilder() { + this(new Permissions()); + } + public PermissionsBuilder(Permissions perms) { + this.perms = perms; + } + public PermissionsBuilder add(Permission p) { + perms.add(p); + return this; + } + public PermissionsBuilder addAll(PermissionCollection col) { + if (col != null) { + for (Enumeration e = col.elements(); e.hasMoreElements(); ) { + perms.add(e.nextElement()); + } + } + return this; + } + public Permissions toPermissions() { + final PermissionsBuilder builder = new PermissionsBuilder(); + builder.addAll(perms); + return builder.perms; + } + } + + // Policy for the test... + public static class SimplePolicy extends Policy { + + final Permissions permissions; + final Permissions allPermissions; + final ThreadLocal allowAll; // actually: this should be in a thread locale + public SimplePolicy(TestCase test, ThreadLocal allowAll) { + this.allowAll = allowAll; + // we don't actually need any permission to create our + // FileHandlers because we're passing invalid parameters + // which will make the creation fail... + permissions = new Permissions(); + permissions.add(new RuntimePermission("accessDeclaredMembers")); + permissions.add(new ReflectPermission("suppressAccessChecks")); + + // these are used for configuring the test itself... + allPermissions = new Permissions(); + allPermissions.add(new java.security.AllPermission()); + + } + + @Override + public boolean implies(ProtectionDomain domain, Permission permission) { + if (allowAll.get().get()) return allPermissions.implies(permission); + return permissions.implies(permission); + } + + @Override + public PermissionCollection getPermissions(CodeSource codesource) { + return new PermissionsBuilder().addAll(allowAll.get().get() + ? allPermissions : permissions).toPermissions(); + } + + @Override + public PermissionCollection getPermissions(ProtectionDomain domain) { + return new PermissionsBuilder().addAll(allowAll.get().get() + ? allPermissions : permissions).toPermissions(); + } + } + +} --- ./jdk/test/java/lang/ProcessBuilder/Basic.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/lang/ProcessBuilder/Basic.java Wed Feb 04 12:14:43 2015 -0800 @@ -2246,9 +2246,10 @@ fail("Test failed: Process exited prematurely"); } long end = System.nanoTime(); - // give waitFor(timeout) a wide berth (100ms) - // Old AIX machines my need a little longer. - if ((end - start) > 100000000L * (AIX.is() ? 4 : 1)) + // give waitFor(timeout) a wide berth (2s) + System.out.printf(" waitFor process: delta: %d%n",(end - start) ); + + if ((end - start) > TimeUnit.SECONDS.toNanos(2)) fail("Test failed: waitFor took too long (" + (end - start) + "ns)"); p.destroy(); @@ -2272,19 +2273,23 @@ final Process p = new ProcessBuilder(childArgs).start(); long start = System.nanoTime(); - p.waitFor(1000, TimeUnit.MILLISECONDS); + p.waitFor(10, TimeUnit.MILLISECONDS); long end = System.nanoTime(); - if ((end - start) < 500000000) + if ((end - start) < TimeUnit.MILLISECONDS.toNanos(10)) fail("Test failed: waitFor didn't take long enough (" + (end - start) + "ns)"); p.destroy(); start = System.nanoTime(); - p.waitFor(1000, TimeUnit.MILLISECONDS); + p.waitFor(8, TimeUnit.SECONDS); end = System.nanoTime(); - if ((end - start) > 900000000) - fail("Test failed: waitFor took too long on a dead process. (" + (end - start) + "ns)"); + + int exitValue = p.exitValue(); + + if ((end - start) > TimeUnit.SECONDS.toNanos(7)) + fail("Test failed: waitFor took too long on a dead process. (" + (end - start) + "ns)" + + ", exitValue: " + exitValue); } catch (Throwable t) { unexpected(t); } //---------------------------------------------------------------- --- ./jdk/test/java/lang/ProcessBuilder/InheritIO/InheritIO.sh Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/lang/ProcessBuilder/InheritIO/InheritIO.sh Wed Feb 04 12:14:43 2015 -0800 @@ -36,9 +36,12 @@ exit 1 fi +if [ "x${COMPILEJAVA}" = "x" ]; then + COMPILEJAVA="${TESTJAVA}" +fi JAVA="${TESTJAVA}/bin/java" -JAVAC="${TESTJAVA}/bin/javac" +JAVAC="${COMPILEJAVA}/bin/javac" cp -f ${TESTSRC}/InheritIO.java . --- ./jdk/test/java/lang/ProcessBuilder/SecurityManagerClinit.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/lang/ProcessBuilder/SecurityManagerClinit.java Wed Feb 04 12:14:43 2015 -0800 @@ -54,9 +54,6 @@ System.getProperty("java.home") + File.separator + "bin" + File.separator + "java"; - // A funky contrived security setup, just for bug repro purposes. - java.security.Security.setProperty("package.access", "java.util"); - final Policy policy = new Policy (new FilePermission("<>", "execute"), --- ./jdk/test/java/lang/instrument/DaemonThread/TestDaemonThread.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/lang/instrument/DaemonThread/TestDaemonThread.java Wed Feb 04 12:14:43 2015 -0800 @@ -28,7 +28,7 @@ * * @build jdk.testlibrary.* DummyAgent DummyClass TestDaemonThreadLauncher TestDaemonThread * @run shell ../MakeJAR3.sh DummyAgent - * @run main TestDaemonThreadLauncher /timeout=240 + * @run main/timeout=240 TestDaemonThreadLauncher * */ import java.io.File; --- ./jdk/test/java/lang/instrument/NMTHelper.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/lang/instrument/NMTHelper.java Wed Feb 04 12:14:43 2015 -0800 @@ -21,8 +21,12 @@ * questions. */ +import java.io.File; +import java.io.FileWriter; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.Arrays; +import java.util.stream.Collectors; import sun.management.ManagementFactoryHelper; import com.sun.management.DiagnosticCommandMBean; @@ -32,8 +36,8 @@ executeDcmd("vmNativeMemory", "baseline"); } - // Total: reserved=3484685KB +293KB, committed=266629KB +293KB - private static Pattern totalLine = Pattern.compile("^Total: reserved=\\d+KB .*KB, committed=\\d+KB (.*)KB$"); + // Total: reserved=3484685KB +293KB, committed=266629KB +293KB + private static Pattern totalLine = Pattern.compile("^Total: reserved=\\d+KB .*KB, committed=\\d+KB (.*)KB$"); public static long committedDiff() throws Exception { String res = (String) executeDcmd("vmNativeMemory", "detail.diff"); @@ -53,14 +57,14 @@ Object[] dcmdArgs = {args}; String[] signature = {String[].class.getName()}; - try { - System.out.print("> " + cmd + " "); - for (String s : args) { - System.out.print(s + " "); - } - System.out.println(":"); + String cmdString = cmd + " " + + Arrays.stream(args).collect(Collectors.joining(" ")); + File f = new File("dcmdoutput-" + cmd + "-" + System.currentTimeMillis() + ".txt"); + System.out.println("Output from Dcmd '" + cmdString + "' is being written to file " + f); + try (FileWriter fw = new FileWriter(f)) { + fw.write("> " + cmdString + ":"); String result = (String) dcmd.invoke(cmd, dcmdArgs, signature); - System.out.println(result); + fw.write(result); return result; } catch(Exception ex) { ex.printStackTrace(); --- ./jdk/test/java/lang/instrument/RedefineBigClass.sh Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/lang/instrument/RedefineBigClass.sh Wed Feb 04 12:14:43 2015 -0800 @@ -27,7 +27,7 @@ # @author Daniel D. Daugherty # # @run shell MakeJAR3.sh RedefineBigClassAgent 'Can-Redefine-Classes: true' -# @run build BigClass RedefineBigClassApp +# @run build BigClass RedefineBigClassApp NMTHelper # @run shell/timeout=600 RedefineBigClass.sh # --- ./jdk/test/java/lang/instrument/RetransformBigClass.sh Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/lang/instrument/RetransformBigClass.sh Wed Feb 04 12:14:43 2015 -0800 @@ -27,7 +27,7 @@ # @author Daniel D. Daugherty # # @run shell MakeJAR4.sh RetransformBigClassAgent SimpleIdentityTransformer 'Can-Retransform-Classes: true' -# @run build BigClass RetransformBigClassApp +# @run build BigClass RetransformBigClassApp NMTHelper # @run shell/timeout=600 RetransformBigClass.sh # --- ./jdk/test/java/lang/instrument/VerifyLocalVariableTableOnRetransformTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/lang/instrument/VerifyLocalVariableTableOnRetransformTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -100,7 +100,7 @@ // The HotSpot VM hands us class file bytes at initial class // load time that match the .class file contents. However, // according to the following spec that is not required: - // http://docs.oracle.com/javase/7/docs/api/java/lang/instrument/Instrumentation.html#retransformClasses(java.lang.Class...) + // https://docs.oracle.com/javase/7/docs/api/java/lang/instrument/Instrumentation.html#retransformClasses(java.lang.Class...) // This test exists to catch any unintentional change in // behavior by the HotSpot VM. If this behavior is intentionally // changed in the future, then this test will need to be --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/invoke/ExplicitCastArgumentsTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang.invoke; + +import sun.invoke.util.Wrapper; + +/* @test + * @summary unit tests for MethodHandles.explicitCastArguments() + * + * @run main/bootclasspath java.lang.invoke.ExplicitCastArgumentsTest + */ +public class ExplicitCastArgumentsTest { + private static final boolean VERBOSE = Boolean.getBoolean("verbose"); + private static final Class THIS_CLASS = ExplicitCastArgumentsTest.class; + + public static void main(String[] args) throws Throwable { + testVarargsCollector(); + testRef2Prim(); + System.out.println("TEST PASSED"); + } + + public static String[] f(String... args) { return args; } + + public static void testVarargsCollector() throws Throwable { + MethodType mt = MethodType.methodType(String[].class, String[].class); + MethodHandle mh = MethodHandles.publicLookup().findStatic(THIS_CLASS, "f", mt); + mh = MethodHandles.explicitCastArguments(mh, MethodType.methodType(Object.class, Object.class)); + mh.invokeWithArguments((Object)(new String[] {"str1", "str2"})); + } + + public static void testRef2Prim() throws Throwable { + for (Wrapper from : Wrapper.values()) { + for (Wrapper to : Wrapper.values()) { + if (from == Wrapper.VOID || to == Wrapper.VOID) continue; + testRef2Prim(from, to); + } + } + } + + public static void testRef2Prim(Wrapper from, Wrapper to) throws Throwable { + // MHs.eCA javadoc: + // If T0 is a reference and T1 a primitive, and if the reference is null at runtime, a zero value is introduced. + test(from.wrapperType(), to.primitiveType(), null, false); + } + + public static void test(Class from, Class to, Object param, boolean failureExpected) throws Throwable { + if (VERBOSE) System.out.printf("%-10s => %-10s: %5s: ", from.getSimpleName(), to.getSimpleName(), param); + + MethodHandle original = MethodHandles.identity(from); + MethodType newType = original.type().changeReturnType(to); + + try { + MethodHandle target = MethodHandles.explicitCastArguments(original, newType); + Object result = target.invokeWithArguments(param); + + if (VERBOSE) { + String resultStr; + if (result != null) { + resultStr = String.format("%10s (%10s)", "'"+result+"'", result.getClass().getSimpleName()); + } else { + resultStr = String.format("%10s", result); + } + System.out.println(resultStr); + } + + if (failureExpected) { + String msg = String.format("No exception thrown: %s => %s; parameter: %s", from, to, param); + throw new AssertionError(msg); + } + } catch (AssertionError e) { + throw e; // report test failure + } catch (Throwable e) { + if (VERBOSE) System.out.printf("%s: %s\n", e.getClass(), e.getMessage()); + if (!failureExpected) { + String msg = String.format("Unexpected exception was thrown: %s => %s; parameter: %s", from, to, param); + throw new AssertionError(msg, e); + } + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/invoke/LFCaching/LFCachingTestCase.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.invoke.MethodHandle; +import java.lang.reflect.InvocationTargetException; + +/** + * Abstract class for lambda forms caching testing. + * + * @author kshefov + */ +public abstract class LFCachingTestCase extends LambdaFormTestCase { + + /** + * Constructor for lambda forms caching test case. + * + * @param testMethod A method from {@code j.l.i.MethodHandles} class that + * returns a {@code j.l.i.MethodHandle} instance. + */ + protected LFCachingTestCase(TestMethods testMethod) { + super(testMethod); + } + + /** + * Checks that the lambda forms of the two adapter method handles adapter1 + * and adapter2 are the same. + * + * @param adapter1 First method handle. + * @param adapter2 Second method handle. + */ + public void checkLFCaching(MethodHandle adapter1, MethodHandle adapter2) { + try { + + if (!adapter1.type().equals(adapter2.type())) { + throw new Error("TESTBUG: Types of the two method handles are not the same"); + } + + Object lambdaForm0 = LambdaFormTestCase.INTERNAL_FORM.invoke(adapter1); + Object lambdaForm1 = LambdaFormTestCase.INTERNAL_FORM.invoke(adapter2); + + if (lambdaForm0 == null || lambdaForm1 == null) { + throw new Error("Unexpected error: One or both lambda forms of the method handles are null"); + } + + if (lambdaForm0 != lambdaForm1) { + // Since LambdaForm caches are based on SoftReferences, GC can cause element eviction. + if (noGCHappened()) { + System.err.println("Lambda form 0 toString is:"); + System.err.println(lambdaForm0); + System.err.println("Lambda form 1 toString is:"); + System.err.println(lambdaForm1); + throw new AssertionError("Error: Lambda forms of the two method handles" + + " are not the same. LF cahing does not work"); + } else { + System.err.println("LambdaForms differ, but there was a GC in between. Ignore the failure."); + } + } + } catch (IllegalAccessException | IllegalArgumentException | + SecurityException | InvocationTargetException ex) { + throw new Error("Unexpected exception: ", ex); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/invoke/LFCaching/LFGarbageCollectedTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test LFGarbageCollectedTest + * @bug 8046703 + * @summary Test verifies that lambda forms are garbage collected + * @author kshefov + * @library /lib/testlibrary/jsr292 /lib/testlibrary + * @ignore 8057020 + * @build TestMethods + * @build LambdaFormTestCase + * @build LFGarbageCollectedTest + * @run main/othervm LFGarbageCollectedTest + */ + +import java.lang.invoke.MethodHandle; +import java.lang.ref.PhantomReference; +import java.lang.ref.ReferenceQueue; +import java.lang.reflect.InvocationTargetException; +import java.util.EnumSet; +import java.util.Map; + +/** + * Lambda forms garbage collection test class. + */ +public final class LFGarbageCollectedTest extends LambdaFormTestCase { + + /** + * Constructor for a lambda forms garbage collection test case. + * + * @param testMethod A method from {@code j.l.i.MethodHandles} class that + * returns a {@code j.l.i.MethodHandle} instance. + */ + public LFGarbageCollectedTest(TestMethods testMethod) { + super(testMethod); + } + + @Override + public void doTest() { + try { + Map data = getTestMethod().getTestCaseData(); + MethodHandle adapter; + try { + adapter = getTestMethod().getTestCaseMH(data, TestMethods.Kind.ONE); + } catch (NoSuchMethodException ex) { + throw new Error("Unexpected exception: ", ex); + } + Object lambdaForm = LambdaFormTestCase.INTERNAL_FORM.invoke(adapter); + if (lambdaForm == null) { + throw new Error("Unexpected error: Lambda form of the method handle is null"); + } + ReferenceQueue rq = new ReferenceQueue(); + PhantomReference ph = new PhantomReference(lambdaForm, rq); + lambdaForm = null; + data = null; + adapter = null; + for (int i = 0; i < 1000 && !ph.isEnqueued(); i++) { + System.gc(); + } + if (!ph.isEnqueued()) { + throw new AssertionError("Error: Lambda form is not garbage collected"); + } + } catch (IllegalAccessException | IllegalArgumentException | + InvocationTargetException ex) { + throw new Error("Unexpected exception: ", ex); + } + } + + /** + * Main routine for lambda forms garbage collection test. + * + * @param args Accepts no arguments. + */ + public static void main(String[] args) { + // The "identity", "constant", "arrayElementGetter" and "arrayElementSetter" + // methods should be removed from this test, + // because their lambda forms are stored in a static field and are not GC'ed. + // There can be only a finite number of such LFs for each method, + // so no memory leak happens. + EnumSet testMethods = EnumSet.complementOf(EnumSet.of( + TestMethods.IDENTITY, + TestMethods.CONSTANT, + TestMethods.ARRAY_ELEMENT_GETTER, + TestMethods.ARRAY_ELEMENT_SETTER)); + LambdaFormTestCase.runTests(LFGarbageCollectedTest::new, testMethods); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/invoke/LFCaching/LFMultiThreadCachingTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test LFMultiThreadCachingTest + * @bug 8046703 + * @summary Test verifies that lambda forms are cached when run with multiple threads + * @author kshefov + * @library /lib/testlibrary/jsr292 /lib/testlibrary + * @build TestMethods + * @build LambdaFormTestCase + * @build LFCachingTestCase + * @build LFMultiThreadCachingTest + * @run main/othervm LFMultiThreadCachingTest + */ + +import java.lang.invoke.MethodHandle; +import java.util.EnumSet; +import java.util.Map; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; + +/** + * Multiple threaded lambda forms caching test class. + */ +public final class LFMultiThreadCachingTest extends LFCachingTestCase { + private static final TestMethods.Kind[] KINDS; + static { + EnumSet set = EnumSet.complementOf(EnumSet.of(TestMethods.Kind.EXCEPT)); + KINDS = set.toArray(new TestMethods.Kind[set.size()]); + if (KINDS.length < 2) { + throw new Error("TESTBUG: KINDS.length[" + KINDS.length + "] should be at least 2"); + } + } + private static final int CORES = Math.max(KINDS.length, Runtime.getRuntime().availableProcessors()); + + /** + * Constructor a for multiple threaded lambda forms caching test case. + * + * @param testMethod A method from {@code j.l.i.MethodHandles} class that + * returns a {@code j.l.i.MethodHandle} instance. + */ + public LFMultiThreadCachingTest(TestMethods testMethod) { + super(testMethod); + } + + @Override + public void doTest() { + Map data = getTestMethod().getTestCaseData(); + ConcurrentLinkedQueue adapters = new ConcurrentLinkedQueue<>(); + CyclicBarrier begin = new CyclicBarrier(CORES); + CountDownLatch end = new CountDownLatch(CORES); + for (int i = 0; i < CORES; ++i) { + TestMethods.Kind kind = KINDS[i % KINDS.length]; + new Thread(() -> { + try { + begin.await(); + adapters.add(getTestMethod().getTestCaseMH(data, kind)); + } catch (InterruptedException | BrokenBarrierException | IllegalAccessException | NoSuchMethodException ex) { + throw new Error("Unexpected exception: ", ex); + } finally { + end.countDown(); + } + }).start(); + } + try { + end.await(); + } catch (InterruptedException ex) { + throw new Error("Unexpected exception: ", ex); + } + if (adapters.size() < CORES) { + throw new Error("adapters size[" + adapters.size() + "] is less than " + CORES); + } + MethodHandle prev = adapters.poll(); + for (MethodHandle current : adapters) { + checkLFCaching(prev, current); + prev = current; + } + } + + /** + * Main routine for multiple threaded lambda forms caching test. + * + * @param args Accepts no arguments. + */ + public static void main(String[] args) { + LambdaFormTestCase.runTests(LFMultiThreadCachingTest::new, EnumSet.allOf(TestMethods.class)); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/invoke/LFCaching/LFSingleThreadCachingTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test LFSingleThreadCachingTest + * @bug 8046703 + * @summary Test verifies that lambda forms are cached when run with single thread + * @author kshefov + * @library /lib/testlibrary/jsr292 /lib/testlibrary + * @build TestMethods + * @build LambdaFormTestCase + * @build LFCachingTestCase + * @build LFSingleThreadCachingTest + * @run main/othervm LFSingleThreadCachingTest + */ + +import java.lang.invoke.MethodHandle; +import java.util.EnumSet; +import java.util.Map; + +/** + * Single threaded lambda forms caching test class. + */ +public final class LFSingleThreadCachingTest extends LFCachingTestCase { + + /** + * Constructor for a single threaded lambda forms caching test case. + * + * @param testMethod A method from {@code j.l.i.MethodHandles} class that + * returns a {@code j.l.i.MethodHandle} instance. + */ + public LFSingleThreadCachingTest(TestMethods testMethod) { + super(testMethod); + } + + @Override + public void doTest() { + MethodHandle adapter1; + MethodHandle adapter2; + Map data = getTestMethod().getTestCaseData(); + try { + adapter1 = getTestMethod().getTestCaseMH(data, TestMethods.Kind.ONE); + adapter2 = getTestMethod().getTestCaseMH(data, TestMethods.Kind.TWO); + } catch (NoSuchMethodException | IllegalAccessException ex) { + throw new Error("Unexpected exception: ", ex); + } + checkLFCaching(adapter1, adapter2); + } + + /** + * Main routine for single threaded lambda forms caching test. + * + * @param args Accepts no arguments. + */ + public static void main(String[] args) { + LambdaFormTestCase.runTests(LFSingleThreadCachingTest::new, EnumSet.allOf(TestMethods.class)); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/invoke/LFCaching/LambdaFormTestCase.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.testlibrary.jsr292.Helper; +import com.sun.management.HotSpotDiagnosticMXBean; + +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.lang.reflect.Method; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; +import jdk.testlibrary.Utils; +import jdk.testlibrary.TimeLimitedRunner; + +/** + * Lambda forms caching test case class. Contains all necessary test routines to + * test lambda forms caching in method handles returned by methods of + * MethodHandles class. + * + * @author kshefov + */ +public abstract class LambdaFormTestCase { + + private final static String METHOD_HANDLE_CLASS_NAME = "java.lang.invoke.MethodHandle"; + private final static String INTERNAL_FORM_METHOD_NAME = "internalForm"; + private static final double ITERATIONS_TO_CODE_CACHE_SIZE_RATIO + = 45 / (128.0 * 1024 * 1024); + private static final long TIMEOUT = Helper.IS_THOROUGH ? 0L : (long) (Utils.adjustTimeout(Utils.DEFAULT_TEST_TIMEOUT) * 0.9); + + /** + * Reflection link to {@code j.l.i.MethodHandle.internalForm} method. It is + * used to get a lambda form from a method handle. + */ + protected final static Method INTERNAL_FORM; + private static final List gcInfo; + + private static long gcCount() { + return gcInfo.stream().mapToLong(GarbageCollectorMXBean::getCollectionCount).sum(); + } + + static { + try { + Class mhClass = Class.forName(METHOD_HANDLE_CLASS_NAME); + INTERNAL_FORM = mhClass.getDeclaredMethod(INTERNAL_FORM_METHOD_NAME); + INTERNAL_FORM.setAccessible(true); + } catch (Exception ex) { + throw new Error("Unexpected exception: ", ex); + } + + gcInfo = ManagementFactory.getGarbageCollectorMXBeans(); + if (gcInfo.size() == 0) { + throw new Error("No GarbageCollectorMXBeans found."); + } + } + + private final TestMethods testMethod; + private static long totalIterations = 0L; + private static long doneIterations = 0L; + private static boolean passed = true; + private static int testCounter = 0; + private static int failCounter = 0; + private long gcCountAtStart; + + /** + * Test case constructor. Generates test cases with random method types for + * given methods form {@code j.l.i.MethodHandles} class. + * + * @param testMethod A method from {@code j.l.i.MethodHandles} class which + * returns a {@code j.l.i.MethodHandle}. + */ + protected LambdaFormTestCase(TestMethods testMethod) { + this.testMethod = testMethod; + this.gcCountAtStart = gcCount(); + } + + public TestMethods getTestMethod() { + return testMethod; + } + + protected boolean noGCHappened() { + return gcCount() == gcCountAtStart; + } + + /** + * Routine that executes a test case. + */ + public abstract void doTest(); + + /** + * Runs a number of test cases defined by the size of testCases list. + * + * @param ctor constructor of LambdaFormCachingTest or its child classes + * object. + * @param testMethods list of test methods + */ + public static void runTests(Function ctor, Collection testMethods) { + long testCaseNum = testMethods.size(); + totalIterations = Math.max(1, Helper.TEST_LIMIT / testCaseNum); + System.out.printf("Number of iterations according to -DtestLimit is %d (%d cases)%n", + totalIterations, totalIterations * testCaseNum); + HotSpotDiagnosticMXBean hsDiagBean = ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class); + long codeCacheSize = Long.parseLong( + hsDiagBean.getVMOption("ReservedCodeCacheSize").getValue()); + System.out.printf("Code Cache Size is %d bytes%n", codeCacheSize); + long iterationsByCodeCacheSize = (long) (codeCacheSize + * ITERATIONS_TO_CODE_CACHE_SIZE_RATIO); + System.out.printf("Number of iterations limited by code cache size is %d (%d cases)%n", + iterationsByCodeCacheSize, iterationsByCodeCacheSize * testCaseNum); + if (totalIterations > iterationsByCodeCacheSize) { + totalIterations = iterationsByCodeCacheSize; + } + System.out.printf("Number of iterations is set to %d (%d cases)%n", + totalIterations, totalIterations * testCaseNum); + System.out.flush(); + TimeLimitedRunner runner = new TimeLimitedRunner(TIMEOUT, 4.0d, + () -> { + if (doneIterations >= totalIterations) { + return false; + } + System.err.println(String.format("Iteration %d:", doneIterations)); + for (TestMethods testMethod : testMethods) { + LambdaFormTestCase testCase = ctor.apply(testMethod); + try { + System.err.printf("Tested LF caching feature with MethodHandles.%s method.%n", + testCase.getTestMethod().name); + testCase.doTest(); + System.err.println("PASSED"); + } catch (Throwable t) { + t.printStackTrace(); + System.err.println("FAILED"); + passed = false; + failCounter++; + } + testCounter++; + } + doneIterations++; + return true; + }); + try { + runner.call(); + } catch (Throwable t) { + t.printStackTrace(); + System.err.println("FAILED"); + throw new Error("Unexpected error!"); + } + if (!passed) { + throw new Error(String.format("%d of %d test cases FAILED! %n" + + "Rerun the test with the same \"-Dseed=\" option as in the log file!", + failCounter, testCounter)); + } else { + System.err.println(String.format("All %d test cases PASSED!", testCounter)); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/invoke/LFCaching/TestMethods.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,705 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.testlibrary.jsr292.Helper; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.Array; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Enumeration containing information about methods from + * {@code j.l.i.MethodHandles} class that are used for testing lambda forms + * caching. + * + * @author kshefov + */ +public enum TestMethods { + + FOLD_ARGUMENTS("foldArguments") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + // Arity after reducing because of long and double take 2 slots. + int realArity = mtTarget.parameterCount(); + int modifierMHArgNum = Helper.RNG.nextInt(realArity + 1); + data.put("modifierMHArgNum", modifierMHArgNum); + Class combinerReturnType; + if (realArity == 0) { + combinerReturnType = void.class; + } else { + combinerReturnType = Helper.RNG.nextBoolean() ? void.class : mtTarget.parameterType(0); + } + data.put("combinerReturnType", combinerReturnType); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + Class combinerReturnType = (Class) data.get("combinerReturnType"); + int modifierMHArgNum = (int) data.get("modifierMHArgNum"); + MethodHandle target = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), kind); + Class rType = mtTarget.returnType(); + int combListStart = (combinerReturnType == void.class) ? 0 : 1; + if (modifierMHArgNum < combListStart) { + modifierMHArgNum = combListStart; + } + MethodHandle combiner = TestMethods.methodHandleGenerator(combinerReturnType, + mtTarget.parameterList().subList(combListStart, + modifierMHArgNum), kind); + return MethodHandles.foldArguments(target, combiner); + } + }, + DROP_ARGUMENTS("dropArguments") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + // Arity after reducing because of long and double take 2 slots. + int realArity = mtTarget.parameterCount(); + int dropArgsPos = Helper.RNG.nextInt(realArity + 1); + data.put("dropArgsPos", dropArgsPos); + MethodType mtDropArgs = TestMethods.randomMethodTypeGenerator( + Helper.RNG.nextInt(super.maxArity - realArity)); + data.put("mtDropArgs", mtDropArgs); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + MethodType mtDropArgs = (MethodType) data.get("mtDropArgs"); + int dropArgsPos = (int) data.get("dropArgsPos"); + MethodHandle target = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), kind); + int mtTgtSlotsCount = TestMethods.argSlotsCount(mtTarget); + int mtDASlotsCount = TestMethods.argSlotsCount(mtDropArgs); + List> fakeParList; + if (mtTgtSlotsCount + mtDASlotsCount > super.maxArity - 1) { + fakeParList = TestMethods.reduceArgListToSlotsCount(mtDropArgs.parameterList(), + super.maxArity - mtTgtSlotsCount - 1); + } else { + fakeParList = mtDropArgs.parameterList(); + } + return MethodHandles.dropArguments(target, dropArgsPos, fakeParList); + } + }, + EXPLICIT_CAST_ARGUMENTS("explicitCastArguments", Helper.MAX_ARITY / 2) { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + // Arity after reducing because of long and double take 2 slots. + int realArity = mtTarget.parameterCount(); + MethodType mtExcplCastArgs = TestMethods.randomMethodTypeGenerator(realArity); + if (mtTarget.returnType() == void.class) { + mtExcplCastArgs = MethodType.methodType(void.class, + mtExcplCastArgs.parameterArray()); + } + if (mtExcplCastArgs.returnType() == void.class) { + mtExcplCastArgs = MethodType.methodType(mtTarget.returnType(), + mtExcplCastArgs.parameterArray()); + } + data.put("mtExcplCastArgs", mtExcplCastArgs); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + MethodType mtExcplCastArgs = (MethodType) data.get("mtExcplCastArgs"); + MethodHandle target = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), kind); + return MethodHandles.explicitCastArguments(target, mtExcplCastArgs); + } + }, + FILTER_ARGUMENTS("filterArguments", Helper.MAX_ARITY / 2) { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + // Arity after reducing because of long and double take 2 slots. + int realArity = mtTarget.parameterCount(); + int filterArgsPos = Helper.RNG.nextInt(realArity + 1); + data.put("filterArgsPos", filterArgsPos); + int filtersArgsArrayLength = Helper.RNG.nextInt(realArity + 1 - filterArgsPos); + data.put("filtersArgsArrayLength", filtersArgsArrayLength); + MethodType mtFilter = TestMethods.randomMethodTypeGenerator(filtersArgsArrayLength); + data.put("mtFilter", mtFilter); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + MethodType mtFilter = (MethodType) data.get("mtFilter"); + int filterArgsPos = (int) data.get("filterArgsPos"); + int filtersArgsArrayLength = (int) data.get("filtersArgsArrayLength"); + MethodHandle target = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), kind); + MethodHandle[] filters = new MethodHandle[filtersArgsArrayLength]; + for (int i = 0; i < filtersArgsArrayLength; i++) { + filters[i] = TestMethods.filterGenerator(mtFilter.parameterType(i), + mtTarget.parameterType(filterArgsPos + i), kind); + } + return MethodHandles.filterArguments(target, filterArgsPos, filters); + } + }, + FILTER_RETURN_VALUE("filterReturnValue") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + // Arity after reducing because of long and double take 2 slots. + int realArity = mtTarget.parameterCount(); + int filterArgsPos = Helper.RNG.nextInt(realArity + 1); + int filtersArgsArrayLength = Helper.RNG.nextInt(realArity + 1 - filterArgsPos); + MethodType mtFilter = TestMethods.randomMethodTypeGenerator(filtersArgsArrayLength); + data.put("mtFilter", mtFilter); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + MethodType mtFilter = (MethodType) data.get("mtFilter"); + MethodHandle target = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), kind); + MethodHandle filter = TestMethods.filterGenerator(mtTarget.returnType(), + mtFilter.returnType(), kind); + return MethodHandles.filterReturnValue(target, filter); + } + }, + INSERT_ARGUMENTS("insertArguments") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + // Arity after reducing because of long and double take 2 slots. + int realArity = mtTarget.parameterCount(); + int insertArgsPos = Helper.RNG.nextInt(realArity + 1); + data.put("insertArgsPos", insertArgsPos); + int insertArgsArrayLength = Helper.RNG.nextInt(realArity + 1 - insertArgsPos); + MethodType mtInsertArgs = MethodType.methodType(void.class, mtTarget.parameterList() + .subList(insertArgsPos, insertArgsPos + insertArgsArrayLength)); + data.put("mtInsertArgs", mtInsertArgs); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + MethodType mtInsertArgs = (MethodType) data.get("mtInsertArgs"); + int insertArgsPos = (int) data.get("insertArgsPos"); + MethodHandle target = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), kind); + Object[] insertList = Helper.randomArgs(mtInsertArgs.parameterList()); + return MethodHandles.insertArguments(target, insertArgsPos, insertList); + } + }, + PERMUTE_ARGUMENTS("permuteArguments", Helper.MAX_ARITY / 2) { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + // Arity after reducing because of long and double take 2 slots. + int realArity = mtTarget.parameterCount(); + int[] permuteArgsReorderArray = new int[realArity]; + int mtPermuteArgsNum = Helper.RNG.nextInt(Helper.MAX_ARITY); + mtPermuteArgsNum = mtPermuteArgsNum == 0 ? 1 : mtPermuteArgsNum; + MethodType mtPermuteArgs = TestMethods.randomMethodTypeGenerator(mtPermuteArgsNum); + mtTarget = mtTarget.changeReturnType(mtPermuteArgs.returnType()); + for (int i = 0; i < realArity; i++) { + int mtPermuteArgsParNum = Helper.RNG.nextInt(mtPermuteArgs.parameterCount()); + permuteArgsReorderArray[i] = mtPermuteArgsParNum; + mtTarget = mtTarget.changeParameterType( + i, mtPermuteArgs.parameterType(mtPermuteArgsParNum)); + } + data.put("mtTarget", mtTarget); + data.put("permuteArgsReorderArray", permuteArgsReorderArray); + data.put("mtPermuteArgs", mtPermuteArgs); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + MethodType mtPermuteArgs = (MethodType) data.get("mtPermuteArgs"); + int[] permuteArgsReorderArray = (int[]) data.get("permuteArgsReorderArray"); + MethodHandle target = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), kind); + return MethodHandles.permuteArguments(target, mtPermuteArgs, permuteArgsReorderArray); + } + }, + THROW_EXCEPTION("throwException") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + Class rType = mtTarget.returnType(); + return MethodHandles.throwException(rType, Exception.class + ); + } + }, + GUARD_WITH_TEST("guardWithTest") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + // Arity after reducing because of long and double take 2 slots. + int realArity = mtTarget.parameterCount(); + int modifierMHArgNum = Helper.RNG.nextInt(realArity + 1); + data.put("modifierMHArgNum", modifierMHArgNum); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + int modifierMHArgNum = (int) data.get("modifierMHArgNum"); + TestMethods.Kind targetKind; + TestMethods.Kind fallbackKind; + if (kind.equals(TestMethods.Kind.ONE)) { + targetKind = TestMethods.Kind.ONE; + fallbackKind = TestMethods.Kind.TWO; + } else { + targetKind = TestMethods.Kind.TWO; + fallbackKind = TestMethods.Kind.ONE; + } + MethodHandle target = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), targetKind); + MethodHandle fallback = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), fallbackKind); + MethodHandle test = TestMethods.methodHandleGenerator(boolean.class, + mtTarget.parameterList().subList(0, modifierMHArgNum), kind); + return MethodHandles.guardWithTest(test, target, fallback); + } + }, + CATCH_EXCEPTION("catchException") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + // Arity after reducing because of long and double take 2 slots. + int realArity = mtTarget.parameterCount(); + int modifierMHArgNum = Helper.RNG.nextInt(realArity + 1); + data.put("modifierMHArgNum", modifierMHArgNum); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + int modifierMHArgNum = (int) data.get("modifierMHArgNum"); + MethodHandle target; + if (kind.equals(TestMethods.Kind.ONE)) { + target = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), TestMethods.Kind.ONE); + } else { + target = TestMethods.methodHandleGenerator(mtTarget.returnType(), + mtTarget.parameterList(), TestMethods.Kind.EXCEPT); + } + List> handlerParamList = new ArrayList<>(mtTarget.parameterCount() + 1); + handlerParamList.add(Exception.class); + handlerParamList.addAll(mtTarget.parameterList().subList(0, modifierMHArgNum)); + MethodHandle handler = TestMethods.methodHandleGenerator( + mtTarget.returnType(), handlerParamList, TestMethods.Kind.TWO); + return MethodHandles.catchException(target, Exception.class, handler); + } + }, + INVOKER("invoker", Helper.MAX_ARITY - 1) { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + return MethodHandles.invoker(mtTarget); + } + }, + EXACT_INVOKER("exactInvoker", Helper.MAX_ARITY - 1) { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + return MethodHandles.exactInvoker(mtTarget); + } + }, + SPREAD_INVOKER("spreadInvoker", Helper.MAX_ARITY - 1) { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + // Arity after reducing because of long and double take 2 slots. + int realArity = mtTarget.parameterCount(); + int modifierMHArgNum = Helper.RNG.nextInt(realArity + 1); + data.put("modifierMHArgNum", modifierMHArgNum); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + int modifierMHArgNum = (int) data.get("modifierMHArgNum"); + return MethodHandles.spreadInvoker(mtTarget, modifierMHArgNum); + } + }, + ARRAY_ELEMENT_GETTER("arrayElementGetter") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + Class rType = mtTarget.returnType(); + if (rType == void.class) { + rType = Object.class; + } + return MethodHandles.arrayElementGetter(Array.newInstance(rType, 2).getClass()); + } + }, + ARRAY_ELEMENT_SETTER("arrayElementSetter") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + Class rType = mtTarget.returnType(); + if (rType == void.class) { + rType = Object.class; + } + return MethodHandles.arrayElementSetter(Array.newInstance(rType, 2).getClass()); + } + }, + CONSTANT("constant") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + Class rType = mtTarget.returnType(); + if (rType == void.class) { + rType = Object.class; + } + if (rType.equals(boolean.class)) { + // There should be the same return values because for default values there are special "zero" forms + return MethodHandles.constant(rType, true); + } else { + return MethodHandles.constant(rType, kind.getValue(rType)); + } + } + }, + IDENTITY("identity") { + @Override + public Map getTestCaseData() { + Map data = new HashMap<>(); + int desiredArity = Helper.RNG.nextInt(super.maxArity); + MethodType mtTarget = TestMethods.randomMethodTypeGenerator(desiredArity); + data.put("mtTarget", mtTarget); + return data; + } + + @Override + protected MethodHandle getMH(Map data, TestMethods.Kind kind) { + MethodType mtTarget = (MethodType) data.get("mtTarget"); + Class rType = mtTarget.returnType(); + if (rType == void.class) { + rType = Object.class; + } + return MethodHandles.identity(rType); + } + }; + + /** + * Test method's name. + */ + public final String name; + + private final int maxArity; + + private TestMethods(String name, int maxArity) { + this.name = name; + this.maxArity = maxArity; + } + + private TestMethods(String name) { + this(name, Helper.MAX_ARITY); + } + + protected MethodHandle getMH(Map data, TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + throw new UnsupportedOperationException("TESTBUG: getMH method is not implemented for test method " + this); + } + + /** + * Creates an adapter method handle depending on a test method from + * MethodHandles class. Adapter is what is returned by the test method. This + * method is able to create two kinds of adapters, their type will be the + * same, but return values are different. + * + * @param data a Map containing data to create a method handle, can be + * obtained by {@link #getTestCaseData} method + * @param kind defines whether adapter ONE or adapter TWO will be + * initialized. Should be equal to TestMethods.Kind.ONE or + * TestMethods.Kind.TWO + * @return Method handle adapter that behaves according to + * TestMethods.Kind.ONE or TestMethods.Kind.TWO + * @throws java.lang.NoSuchMethodException + * @throws java.lang.IllegalAccessException + */ + public MethodHandle getTestCaseMH(Map data, TestMethods.Kind kind) + throws NoSuchMethodException, IllegalAccessException { + if (data == null) { + throw new Error(String.format("TESTBUG: Data for test method %s is not prepared", + this.name)); + } + if (!kind.equals(TestMethods.Kind.ONE) && !kind.equals(TestMethods.Kind.TWO)) { + throw new IllegalArgumentException("TESTBUG: Wrong \"kind\" (" + kind + + ") arg to getTestCaseMH function." + + " Should be Kind.ONE or Kind.TWO"); + } + return getMH(data, kind); + } + + /** + * Returns a data Map needed for {@link #getTestCaseMH} method. + * + * @return data Map needed for {@link #getTestCaseMH} method + */ + public Map getTestCaseData() { + throw new UnsupportedOperationException( + "TESTBUG: getTestCaseData method is not implemented for test method " + this); + } + + /** + * Enumeration used in methodHandleGenerator to define whether a MH returned + * by this method returns "2" in different type representations, "4", or + * throw an Exception. + */ + public static enum Kind { + + ONE(2), + TWO(4), + EXCEPT(0); + + private final int value; + + private Object getValue(Class cl) { + return Helper.castToWrapper(value, cl); + } + + private MethodHandle getBasicMH(Class rType) throws NoSuchMethodException, IllegalAccessException { + MethodHandle result = null; + switch (this) { + case ONE: + case TWO: + if (rType.equals(void.class)) { + result = MethodHandles.lookup().findVirtual(Kind.class, "returnVoid", MethodType.methodType(void.class)); + result = MethodHandles.insertArguments(result, 0, this); + } else { + result = MethodHandles.constant(rType, getValue(rType)); + } + break; + case EXCEPT: + result = MethodHandles.throwException(rType, Exception.class); + result = MethodHandles.insertArguments(result, 0, new Exception()); + break; + } + return result; + } + + private void returnVoid() { + } + + private Kind(int value) { + this.value = value; + } + } + + /** + * Routine used to obtain a randomly generated method type. + * + * @param arity Arity of returned method type. + * @return MethodType generated randomly. + */ + private static MethodType randomMethodTypeGenerator(int arity) { + final Class[] CLASSES = { + Object.class, + int.class, + boolean.class, + byte.class, + short.class, + char.class, + long.class, + float.class, + double.class + }; + if (arity > Helper.MAX_ARITY) { + throw new IllegalArgumentException( + String.format("Arity should not exceed %d!", Helper.MAX_ARITY)); + } + List> list = Helper.randomClasses(CLASSES, arity); + list = Helper.getParams(list, false, arity); + int i = Helper.RNG.nextInt(CLASSES.length + 1); + Class rtype = i == CLASSES.length ? void.class : CLASSES[i]; + return MethodType.methodType(rtype, list); + } + + /** + * Routine used to obtain a method handles of a given type an kind (return + * value). + * + * @param returnType Type of MH return value. + * @param argTypes Types of MH args. + * @param kind Defines whether the obtained MH returns "1" or "2". + * @return Method handle of the given type. + * @throws NoSuchMethodException + * @throws IllegalAccessException + */ + private static MethodHandle methodHandleGenerator(Class returnType, + List> argTypes, TestMethods.Kind kind) + throws NoSuchMethodException, IllegalAccessException { + MethodHandle result; + result = kind.getBasicMH(returnType); + return Helper.addTrailingArgs(result, argTypes.size(), argTypes); + } + + /** + * Routine that generates filter method handles to test + * MethodHandles.filterArguments method. + * + * @param inputType Filter's argument type. + * @param returnType Filter's return type. + * @param kind Filter's return value definer. + * @return A filter method handle, that takes one argument. + * @throws NoSuchMethodException + * @throws IllegalAccessException + */ + private static MethodHandle filterGenerator(Class inputType, Class returnType, + TestMethods.Kind kind) throws NoSuchMethodException, IllegalAccessException { + MethodHandle tmpMH = kind.getBasicMH(returnType); + if (inputType.equals(void.class)) { + return tmpMH; + } + ArrayList> inputTypeList = new ArrayList<>(1); + inputTypeList.add(inputType); + return Helper.addTrailingArgs(tmpMH, 1, inputTypeList); + } + + private static int argSlotsCount(MethodType mt) { + int result = 0; + for (Class cl : mt.parameterArray()) { + if (cl.equals(long.class) || cl.equals(double.class)) { + result += 2; + } else { + result++; + } + } + return result; + } + + private static List> reduceArgListToSlotsCount(List> list, + int desiredSlotCount) { + List> result = new ArrayList<>(desiredSlotCount); + int count = 0; + for (Class cl : list) { + if (count >= desiredSlotCount) { + break; + } + if (cl.equals(long.class) || cl.equals(double.class)) { + count += 2; + } else { + count++; + } + result.add(cl); + } + return result; + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/invoke/LambdaFormTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + * @summary unit tests for java.lang.invoke.LambdaForm + * @run junit/othervm test.java.lang.invoke.LambdaFormTest + */ +package test.java.lang.invoke; + +import org.junit.Test; +import java.lang.reflect.Method; +import static org.junit.Assert.*; + +public class LambdaFormTest { + static final Method M_shortenSignature; + static { + try { + Class impl = Class.forName("java.lang.invoke.LambdaForm", false, null); + Method m = impl.getDeclaredMethod("shortenSignature", String.class); + m.setAccessible(true); + M_shortenSignature = m; + } catch(Exception e) { + throw new AssertionError(e); + } + } + + public static String shortenSignature(String signature) throws ReflectiveOperationException { + return (String)M_shortenSignature.invoke(null, signature); + } + + @Test + public void testShortenSignature() throws ReflectiveOperationException { + for (String s : new String[] { + // invariant strings: + "L", "LL", "ILL", "LIL", "LLI", "IILL", "ILIL", "ILLI", + // a few mappings: + "LLL=L3", "LLLL=L4", "LLLLLLLLLL=L10", + "IIIDDD=I3D3", "IDDD=ID3", "IIDDD=IID3", "IIID=I3D", "IIIDD=I3DD" + }) { + String s2 = s.substring(s.indexOf('=')+1); + String s1 = s.equals(s2) ? s : s.substring(0, s.length() - s2.length() - 1); + // mix the above cases with before and after reps of Z* + for (int k = -3; k <= 3; k++) { + String beg = (k < 0 ? "ZZZZ".substring(-k) : ""); + String end = (k > 0 ? "ZZZZ".substring(+k) : ""); + String ks1 = beg+s1+end; + String ks2 = shortenSignature(beg)+s2+shortenSignature(end); + String ks3 = shortenSignature(ks1); + assertEquals(ks2, ks3); + } + } + } + + public static void main(String[] args) throws ReflectiveOperationException { + LambdaFormTest test = new LambdaFormTest(); + test.testShortenSignature(); + } +} --- ./jdk/test/java/lang/invoke/MethodHandles/CatchExceptionTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/lang/invoke/MethodHandles/CatchExceptionTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -43,6 +43,7 @@ public class CatchExceptionTest { private static final List> ARGS_CLASSES; protected static final int MAX_ARITY = Helper.MAX_ARITY - 1; + static { Class classes[] = { Object.class, @@ -53,11 +54,8 @@ double[].class, String.class, }; - List> list = new ArrayList<>(MAX_ARITY); - for (int i = 0; i < MAX_ARITY; ++i) { - list.add(classes[Helper.RNG.nextInt(classes.length)]); - } - ARGS_CLASSES = Collections.unmodifiableList(list); + ARGS_CLASSES = Collections.unmodifiableList( + Helper.randomClasses(classes, MAX_ARITY)); } private final TestCase testCase; @@ -67,7 +65,6 @@ private int dropped; private MethodHandle thrower; - public CatchExceptionTest(TestCase testCase, final boolean isVararg, final int argsCount, final int catchDrops) { this.testCase = testCase; @@ -108,37 +105,7 @@ } private List> getThrowerParams(boolean isVararg, int argsCount) { - boolean unmodifiable = true; - List> classes; - classes = ARGS_CLASSES.subList(0, - Math.min(argsCount, (MAX_ARITY / 2) - 1)); - int extra = 0; - if (argsCount >= MAX_ARITY / 2) { - classes = new ArrayList<>(classes); - unmodifiable = false; - extra = (int) classes.stream().filter(Helper::isDoubleCost).count(); - int i = classes.size(); - while (classes.size() + extra < argsCount) { - Class aClass = ARGS_CLASSES.get(i); - if (Helper.isDoubleCost(aClass)) { - ++extra; - if (classes.size() + extra >= argsCount) { - break; - } - } - classes.add(aClass); - } - } - if (isVararg && classes.size() > 0) { - if (unmodifiable) { - classes = new ArrayList<>(classes); - } - int last = classes.size() - 1; - Class aClass = classes.get(classes.size() - 1); - aClass = Array.newInstance(aClass, 2).getClass(); - classes.set(last, aClass); - } - return classes; + return Helper.getParams(ARGS_CLASSES, isVararg, argsCount); } --- ./jdk/test/java/lang/invoke/MethodHandlesTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/lang/invoke/MethodHandlesTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -2160,15 +2160,23 @@ else type = type.changeParameterType(j, argType); if (done.add(type)) - testInvokers(type); + testInvokersWithCatch(type); MethodType vtype = type.changeReturnType(void.class); if (done.add(vtype)) - testInvokers(vtype); + testInvokersWithCatch(vtype); } } } } + public void testInvokersWithCatch(MethodType type) throws Throwable { + try { + testInvokers(type); + } catch (Throwable ex) { + System.out.println("*** testInvokers on "+type+" => "); + ex.printStackTrace(System.out); + } + } public void testInvokers(MethodType type) throws Throwable { if (verbosity >= 3) System.out.println("test invokers for "+type); --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/invoke/VarargsArrayTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang.invoke; + +import sun.invoke.util.Wrapper; + +import java.util.Arrays; +import java.util.Collections; + +/* @test + * @summary unit tests for varargs array methods: MethodHandleInfo.varargsArray(int), + * MethodHandleInfo.varargsArray(Class,int) & MethodHandleInfo.varargsList(int) + * + * @run main/bootclasspath java.lang.invoke.VarargsArrayTest + * @run main/bootclasspath -DVarargsArrayTest.MAX_ARITY=255 -DVarargsArrayTest.START_ARITY=250 + * java.lang.invoke.VarargsArrayTest + */ + +/* This might take a while and burn lots of metadata: + * @run main/bootclasspath -DVarargsArrayTest.MAX_ARITY=255 -DVarargsArrayTest.EXHAUSTIVE=true java.lang.invoke.VarargsArrayTest + */ +public class VarargsArrayTest { + private static final Class CLASS = VarargsArrayTest.class; + private static final int MAX_ARITY = Integer.getInteger(CLASS.getSimpleName()+".MAX_ARITY", 40); + private static final int START_ARITY = Integer.getInteger(CLASS.getSimpleName()+".START_ARITY", 0); + private static final boolean EXHAUSTIVE = Boolean.getBoolean(CLASS.getSimpleName()+".EXHAUSTIVE"); + + public static void main(String[] args) throws Throwable { + testVarargsArray(); + testVarargsReferenceArray(); + testVarargsPrimitiveArray(); + } + + public static void testVarargsArray() throws Throwable { + final int MIN = START_ARITY; + final int MAX = MAX_ARITY-2; // 253+1 would cause parameter overflow with 'this' added + for (int nargs = MIN; nargs <= MAX; nargs = nextArgCount(nargs, 17, MAX)) { + MethodHandle target = MethodHandleImpl.varargsArray(nargs); + Object[] args = new Object[nargs]; + for (int i = 0; i < nargs; i++) + args[i] = "#"+i; + Object res = target.invokeWithArguments(args); + assertArrayEquals(args, (Object[])res); + } + } + + public static void testVarargsReferenceArray() throws Throwable { + testTypedVarargsArray(Object[].class); + testTypedVarargsArray(String[].class); + testTypedVarargsArray(Number[].class); + } + + public static void testVarargsPrimitiveArray() throws Throwable { + testTypedVarargsArray(int[].class); + testTypedVarargsArray(long[].class); + testTypedVarargsArray(byte[].class); + testTypedVarargsArray(boolean[].class); + testTypedVarargsArray(short[].class); + testTypedVarargsArray(char[].class); + testTypedVarargsArray(float[].class); + testTypedVarargsArray(double[].class); + } + + private static int nextArgCount(int nargs, int density, int MAX) { + if (EXHAUSTIVE) return nargs + 1; + if (nargs >= MAX) return Integer.MAX_VALUE; + int BOT = 20, TOP = MAX-5; + if (density < 10) { BOT = 10; MAX = TOP-2; } + if (nargs <= BOT || nargs >= TOP) { + ++nargs; + } else { + int bump = Math.max(1, 100 / density); + nargs += bump; + if (nargs > TOP) nargs = TOP; + } + return nargs; + } + + private static void testTypedVarargsArray(Class arrayType) throws Throwable { + Class elemType = arrayType.getComponentType(); + int MIN = START_ARITY; + int MAX = MAX_ARITY-2; // 253+1 would cause parameter overflow with 'this' added + int density = 3; + if (elemType == int.class || elemType == long.class) density = 7; + if (elemType == long.class || elemType == double.class) { MAX /= 2; MIN /= 2; } + for (int nargs = MIN; nargs <= MAX; nargs = nextArgCount(nargs, density, MAX)) { + Object[] args = makeTestArray(elemType, nargs); + MethodHandle varargsArray = MethodHandleImpl.varargsArray(arrayType, nargs); + MethodType vaType = varargsArray.type(); + assertEquals(arrayType, vaType.returnType()); + if (nargs != 0) { + assertEquals(elemType, vaType.parameterType(0)); + assertEquals(elemType, vaType.parameterType(vaType.parameterCount()-1)); + } + assertEquals(MethodType.methodType(arrayType, Collections.>nCopies(nargs, elemType)), + vaType); + Object res = varargsArray.invokeWithArguments(args); + assertEquals(res.getClass(), arrayType); + String resString = toArrayString(res); + assertEquals(Arrays.toString(args), resString); + + MethodHandle spreader = varargsArray.asSpreader(arrayType, nargs); + MethodType stype = spreader.type(); + assert(stype == MethodType.methodType(arrayType, arrayType)); + if (nargs <= 5) { + // invoke target as a spreader also: + @SuppressWarnings("cast") + Object res2 = spreader.invokeWithArguments((Object)res); + String res2String = toArrayString(res2); + assertEquals(Arrays.toString(args), res2String); + // invoke the spreader on a generic Object[] array; check for error + try { + Object res3 = spreader.invokeWithArguments((Object)args); + String res3String = toArrayString(res3); + assertTrue(arrayType.getName(), arrayType.isAssignableFrom(Object[].class)); + assertEquals(Arrays.toString(args), res3String); + } catch (ClassCastException ex) { + assertFalse(arrayType.getName(), arrayType.isAssignableFrom(Object[].class)); + } + } + if (nargs == 0) { + // invoke spreader on null arglist + Object res3 = spreader.invokeWithArguments((Object)null); + String res3String = toArrayString(res3); + assertEquals(Arrays.toString(args), res3String); + } + } + } + + private static Object[] makeTestArray(Class elemType, int len) { + Wrapper elem = null; + if (elemType.isPrimitive()) + elem = Wrapper.forPrimitiveType(elemType); + else if (Wrapper.isWrapperType(elemType)) + elem = Wrapper.forWrapperType(elemType); + Object[] args = new Object[len]; + for (int i = 0; i < len; i++) { + Object arg = i * 100; + if (elem == null) { + if (elemType == String.class) + arg = "#"+arg; + arg = elemType.cast(arg); // just to make sure + } else { + switch (elem) { + case BOOLEAN: arg = (i % 3 == 0); break; + case CHAR: arg = 'a' + i; break; + case LONG: arg = (long)i * 1000_000_000; break; + case FLOAT: arg = (float)i / 100; break; + case DOUBLE: arg = (double)i / 1000_000; break; + } + arg = elem.cast(arg, elemType); + } + args[i] = arg; + } + return args; + } + + private static String toArrayString(Object a) { + if (a == null) return "null"; + Class elemType = a.getClass().getComponentType(); + if (elemType == null) return a.toString(); + if (elemType.isPrimitive()) { + switch (Wrapper.forPrimitiveType(elemType)) { + case INT: return Arrays.toString((int[])a); + case BYTE: return Arrays.toString((byte[])a); + case BOOLEAN: return Arrays.toString((boolean[])a); + case SHORT: return Arrays.toString((short[])a); + case CHAR: return Arrays.toString((char[])a); + case FLOAT: return Arrays.toString((float[])a); + case LONG: return Arrays.toString((long[])a); + case DOUBLE: return Arrays.toString((double[])a); + } + } + return Arrays.toString((Object[])a); + } + + public static void assertArrayEquals(Object[] arr1, Object[] arr2) { + if (arr1 == null && arr2 == null) return; + if (arr1 != null && arr2 != null && arr1.length == arr2.length) { + for (int i = 0; i < arr1.length; i++) { + assertEquals(arr1[i], arr2[i]); + } + return; + } + throw new AssertionError(Arrays.deepToString(arr1) + " != " + Arrays.deepToString(arr2)); + } + + public static void assertEquals(Object o1, Object o2) { + if (o1 == null && o2 == null) return; + if (o1 != null && o1.equals(o2)) return; + throw new AssertionError(o1 + " != " + o2); + } + + public static void assertTrue(String msg, boolean b) { + if (!b) { + throw new AssertionError(msg); + } + } + + public static void assertFalse(String msg, boolean b) { + assertTrue(msg, !b); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/ref/EarlyTimeout.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + * @bug 6853696 + * @summary ReferenceQueue#remove(timeout) should not return null before + * timeout is elapsed + */ + +import java.lang.InterruptedException; +import java.lang.System; +import java.lang.ref.Reference; +import java.lang.ref.ReferenceQueue; +import java.lang.ref.WeakReference; +import java.util.concurrent.CountDownLatch; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +/** + * In order to demonstrate the issue we make several threads (two appears to be sufficient) + * to block in ReferenceQueue#remove(timeout) at the same time. + * Then, we force a reference to be enqueued by setting its referent to null and calling System.gc(). + * One of the threads gets the reference returned from the remove(). + * The other threads get null: + * 1) with bug: this may happen before the specified timeout is elapsed, + * 2) without bug: this can only happen after the timeout is fully elapsed. + */ + +public class EarlyTimeout extends Thread { + + static final int THREADS_COUNT = 2; + static final int TIMEOUT = 1000; + + static Object referent = new Object(); + static final ReferenceQueue queue = new ReferenceQueue(); + static final WeakReference weakReference = new WeakReference(referent, queue); + static final CountDownLatch startedSignal = new CountDownLatch(THREADS_COUNT); + + long actual; + Reference reference; + + public static void main(String[] args) throws Exception { + EarlyTimeout[] threads = new EarlyTimeout[THREADS_COUNT]; + for (int i = 0; i < THREADS_COUNT; ++i) { + threads[i] = new EarlyTimeout(); + threads[i].start(); + } + // The main thread waits until the threads has started and give it a chance + // for the threads to block on the queue.remove(TIMEOUT) call + startedSignal.await(); + Thread.sleep(TIMEOUT / 2); + referent = null; + System.gc(); + for (EarlyTimeout thread : threads) { + thread.join(); + } + if (weakReference.get() != null) { + throw new RuntimeException("weakReference was not cleared"); + } + int nonNullRefCount = 0; + for (EarlyTimeout thread : threads) { + if (thread.reference == null && thread.actual < TIMEOUT) { + throw new RuntimeException("elapsed time " + thread.actual + + " is less than timeout " + TIMEOUT); + } + if (thread.reference != null && thread.reference == weakReference) { + nonNullRefCount++; + } + } + if (nonNullRefCount > 1) { + throw new RuntimeException("more than one references were removed from queue"); + } + } + + public void run() { + try { + startedSignal.countDown(); + long start = System.nanoTime(); + reference = queue.remove(TIMEOUT); + actual = NANOSECONDS.toMillis(System.nanoTime() - start); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/reflect/Generics/ThreadSafety.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,129 @@ +/* + * Copyright 2014 Google Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8062771 8016236 + * @summary Test publication of Class objects via a data race + * @run testng ThreadSafety + */ + +import java.net.URL; +import java.net.URLClassLoader; +import java.util.Collections; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.Callable; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeoutException; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.testng.Assert.*; +import org.testng.annotations.Test; + +/** + * A test resulting from an attempt to repro this failure (in guice): + * + * java.lang.NullPointerException + * at sun.reflect.generics.visitor.Reifier.visitClassTypeSignature(Reifier.java:125) + * at sun.reflect.generics.tree.ClassTypeSignature.accept(ClassTypeSignature.java:49) + * at sun.reflect.generics.repository.ClassRepository.getSuperclass(ClassRepository.java:84) + * at java.lang.Class.getGenericSuperclass(Class.java:692) + * at com.google.inject.TypeLiteral.getSuperclassTypeParameter(TypeLiteral.java:99) + * at com.google.inject.TypeLiteral.(TypeLiteral.java:79) + * + * However, as one would expect with thread safety problems in reflection, these + * are very hard to reproduce. This very test has never been observed to fail, + * but a similar test has been observed to fail about once in 2000 executions + * (about once every 6 CPU-hours), in jdk7 only. It appears to be fixed in jdk8+ by: + * + * 8016236: Class.getGenericInterfaces performance improvement. + * (by making Class.genericInfo volatile) + */ +public class ThreadSafety { + public static class EmptyClass { + public static class EmptyGenericSuperclass {} + public static class EmptyGenericSubclass extends EmptyGenericSuperclass {} + } + + /** published via data race */ + private Class racyClass = Object.class; + + private URL[] urls = ((URLClassLoader) ThreadSafety.class.getClassLoader()).getURLs(); + + private Class createNewEmptyGenericSubclassClass() throws Exception { + URLClassLoader ucl = new URLClassLoader(urls, null); + return Class.forName("ThreadSafety$EmptyClass$EmptyGenericSubclass", true, ucl); + } + + @Test + public void testRacy_getGenericSuperclass() throws Exception { + final int nThreads = 10; + final int iterations = 30; + final int timeout = 10; + final CyclicBarrier newCycle = new CyclicBarrier(nThreads); + final Callable task = new Callable() { + public Void call() throws Exception { + for (int i = 0; i < iterations; i++) { + final int threadId; + try { + threadId = newCycle.await(timeout, SECONDS); + } catch (BrokenBarrierException e) { + return null; + } + for (int j = 0; j < iterations; j++) { + // one thread publishes the class object via a data + // race, for the other threads to consume. + if (threadId == 0) { + racyClass = createNewEmptyGenericSubclassClass(); + } else { + racyClass.getGenericSuperclass(); + } + } + } + return null; + }}; + + final ExecutorService pool = Executors.newFixedThreadPool(nThreads); + try { + for (Future future : + pool.invokeAll(Collections.nCopies(nThreads, task))) { + try { + future.get(iterations * timeout, SECONDS); + } catch (ExecutionException e) { + // ignore "collateral damage" + if (!(e.getCause() instanceof BrokenBarrierException) + && + !(e.getCause() instanceof TimeoutException)) { + throw e; + } + } + } + } finally { + pool.shutdownNow(); + assertTrue(pool.awaitTermination(2 * timeout, SECONDS)); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/lang/reflect/annotationSharing/AnnotationSharing.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8054987 + * @summary Test sharing of annotations between Executable/Field instances. + * Sharing should not be noticeable when performing mutating + * operations. + * @run testng AnnotationSharing + */ + +import java.lang.annotation.*; +import java.lang.reflect.*; + +import org.testng.annotations.Test; + +public class AnnotationSharing { + public static void main(String ... args) throws Exception { + } + + @Test + public void testMethodSharing() throws Exception { + Method[] m1 = AnnotationSharing.class.getMethods(); + Method[] m2 = AnnotationSharing.class.getMethods(); + validateSharingSafelyObservable(m1, m2); + } + + @Test + public void testDeclaredMethodSharing() throws Exception { + Method[] m3 = AnnotationSharing.class.getDeclaredMethods(); + Method[] m4 = AnnotationSharing.class.getDeclaredMethods(); + validateSharingSafelyObservable(m3, m4); + } + + @Test + public void testFieldSharing() throws Exception { + Field[] f1 = AnnotationSharing.class.getFields(); + Field[] f2 = AnnotationSharing.class.getFields(); + validateSharingSafelyObservable(f1, f2); + } + + @Test + public void testDeclaredFieldsSharing() throws Exception { + Field[] f3 = AnnotationSharing.class.getDeclaredFields(); + Field[] f4 = AnnotationSharing.class.getDeclaredFields(); + validateSharingSafelyObservable(f3, f4); + } + + @Test + public void testMethodSharingOccurs() throws Exception { + Method mm1 = AnnotationSharing.class.getDeclaredMethod("m", (Class[])null); + Method mm2 = AnnotationSharing.class.getDeclaredMethod("m", (Class[])null); + validateAnnotationSharing(mm1, mm2); + } + + @Test + public void testMethodSharingIsSafe() throws Exception { + Method mm1 = AnnotationSharing.class.getDeclaredMethod("m", (Class[])null); + Method mm2 = AnnotationSharing.class.getDeclaredMethod("m", (Class[])null); + validateAnnotationSharingIsSafe(mm1, mm2); + validateArrayValues(mm1.getAnnotation(Baz.class), mm2.getAnnotation(Baz.class)); + } + + @Test + public void testFieldSharingOccurs() throws Exception { + Field ff1 = AnnotationSharing.class.getDeclaredField("f"); + Field ff2 = AnnotationSharing.class.getDeclaredField("f"); + validateAnnotationSharing(ff1, ff2); + } + + @Test + public void testFieldSharingIsSafe() throws Exception { + Field ff1 = AnnotationSharing.class.getDeclaredField("f"); + Field ff2 = AnnotationSharing.class.getDeclaredField("f"); + validateAnnotationSharingIsSafe(ff1, ff2); + validateArrayValues(ff1.getAnnotation(Baz.class), ff2.getAnnotation(Baz.class)); + } + + // Validate that AccessibleObject instances are not shared + private static void validateSharingSafelyObservable(AccessibleObject[] m1, AccessibleObject[] m2) + throws Exception { + + // Validate that setAccessible works + for (AccessibleObject m : m1) + m.setAccessible(false); + + for (AccessibleObject m : m2) + m.setAccessible(true); + + for (AccessibleObject m : m1) + if (m.isAccessible()) + throw new RuntimeException(m + " should not be accessible"); + + for (AccessibleObject m : m2) + if (!m.isAccessible()) + throw new RuntimeException(m + " should be accessible"); + + // Validate that methods are still equal() + for (int i = 0; i < m1.length; i++) + if (!m1[i].equals(m2[i])) + throw new RuntimeException(m1[i] + " and " + m2[i] + " should be equal()"); + + // Validate that the arrays aren't shared + for (int i = 0; i < m1.length; i++) + m1[i] = null; + + for (int i = 0; i < m2.length; i++) + if (m2[i] == null) + throw new RuntimeException("Detected sharing of AccessibleObject arrays"); + } + + // Validate that annotations are shared + private static void validateAnnotationSharing(AccessibleObject m1, AccessibleObject m2) { + Bar b1 = m1.getAnnotation(Bar.class); + Bar b2 = m2.getAnnotation(Bar.class); + + if (b1 != b2) + throw new RuntimeException(b1 + " and " + b2 + " should be =="); + + } + + // Validate that Method instances representing the annotation elements + // behave as intended + private static void validateAnnotationSharingIsSafe(AccessibleObject m1, AccessibleObject m2) + throws Exception { + Bar b1 = m1.getAnnotation(Bar.class); + Bar b2 = m2.getAnnotation(Bar.class); + + Method mm1 = b1.annotationType().getMethod("value", (Class[]) null); + Method mm2 = b2.annotationType().getMethod("value", (Class[]) null); + inner(mm1, mm2); + + mm1 = b1.getClass().getMethod("value", (Class[]) null); + mm2 = b2.getClass().getMethod("value", (Class[]) null); + inner(mm1, mm2); + + } + private static void inner(Method mm1, Method mm2) + throws Exception { + if (!mm1.equals(mm2)) + throw new RuntimeException(mm1 + " and " + mm2 + " should be equal()"); + + mm1.setAccessible(false); + mm2.setAccessible(true); + + if (mm1.isAccessible()) + throw new RuntimeException(mm1 + " should not be accessible"); + + if (!mm2.isAccessible()) + throw new RuntimeException(mm2 + " should be accessible"); + } + + // Validate that array element values are not shared + private static void validateArrayValues(Baz a, Baz b) { + String[] s1 = a.value(); + String[] s2 = b.value(); + + s1[0] = "22"; + + if (!s2[0].equals("1")) + throw new RuntimeException("Mutation of array elements should not be detectable"); + } + + @Foo @Bar("val") @Baz({"1", "2"}) + public void m() { + return ; + } + + @Foo @Bar("someValue") @Baz({"1", "22", "33"}) + public Object f = new Object(); +} + +@Retention(RetentionPolicy.RUNTIME) +@interface Foo {} + +@Retention(RetentionPolicy.RUNTIME) +@interface Bar { + String value(); +} + +@Retention(RetentionPolicy.RUNTIME) +@interface Baz { + String [] value(); +} --- ./jdk/test/java/math/BigDecimal/ZeroScalingTests.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/math/BigDecimal/ZeroScalingTests.java Wed Feb 04 12:14:43 2015 -0800 @@ -23,8 +23,10 @@ /* * @test - * @bug 4902952 4905407 4916149 - * @summary Tests that the scale of zero is propagated properly and has the proper effect. + * @bug 4902952 4905407 4916149 8057793 + * @summary Tests that the scale of zero is propagated properly and has the + * proper effect and that setting the scale to zero does not mutate the + * BigDecimal. * @author Joseph D. Darcy */ @@ -445,6 +447,16 @@ return failures; } + static int setScaleDoesNotMutateTest() { + BigDecimal total = new BigDecimal("258815507198903607775511093103396443816569106750031264155319238473795838680758514810110764742309284477206138527975952150289602995045050194333030191178778772026538699925775139201970526695485362661420908248887297829319881475178467494779683293036572059595504702727301324759997409522995072582369210284334718757260859794972695026582432867589093687280300148141501712013226636373167978223780290547640482160818746599330924736802844173226042389174403401903999447463440670236056324929325189403433689" + + ".426167432065785331444814035799717606745777287606858873045971898862329763544687891847664736523584843544347118836628373041412918374550458884706686730726101338872517021688769782894793734049819222924171842793485919753186993388451909096042127903835765393729547730953942175461146061715108701615615142134282261293656760570061554783195726716403304101469782303957325142638493327692352838806741611887655695029948975509680496573999174402058593454203190963443179532640446352828089016874853634851387762579319853267317320515941105912189838719919259277721994880193541634872882180184303434360412344059435559680494807415573269199203376126242271766939666939316648575065702750502798973418978204972336924254702551350654650573582614211506856383897692911422458286912085339575875324832979140870119455620532272318122103640233069115700020760625493816902806241630788230268031695140687964931377988962507263990468276009750998066442971308866347136022907166625330623130307555914930120150437900510530537258665172619821272937026713977709974434967165159545592482710663639966781678268622620229577009317698254134914742098420792313931843709810905414336383757407675429663714210967924767434203021205270369316797752411974617662200898086335322218191674846795163102021505555508444216708745911194321674887527227200297039471799580744303346354057273540730643842091810899490590914195225087593013834388801018488174855060306804024894292757613618190472234110859436472645203753139820658279559340251226992556744343475086923568365637919479462424794554522865559888240039662899509652221329892034706445253487898044421278283079233226845124525434586324657471286953226255430662125870993375281512713207125720748163498642795960457639954616530163959004770092547297392499137383176609646505351001304840762905826237024982330597805063521162285806541220110524989649256399233792799406995068469271941269511818994954109392839548141262324660472253632382325038836831429045617036015122388070240133760858500132713255407855625837956886349324981003917084922808187223285051144454915441134217743066575863563572152133978905444998209075763950909784148142018992367290485890072303179512881131769414783097454103103347826517701720263541869335631166977965013552647906729408522950996105479525445916501155305220090853891226367184989434453290788068397817927893708837722255115237672194162924260945492012622891770365546831236789867922136747819364833843397165107825773447549885351449899330007200651144003961228091210630807333236718793283427788965479074476288255387824982443633190938302785760754436525586544523339170400053128503337395428393881357669568532722167493096151221381017320147344991331421789379785964440840684363041795410525097564979585773948558651896834067324427900848255265001498890329859444233861478388742393060996236783742654761350763876989363052609107226398858310051497856931093693697981165801539060516895227818925342535261227134364063673285588256280386915163875872231395348293505967057794409379709079685798908660258077792158532257603211711587587586356431658240229896344639704"); + if (total.setScale(0, RoundingMode.DOWN).equals(total.setScale(0, RoundingMode.DOWN))) { + return 0; + } else { + return 1; + } + } + public static void main(String argv[]) { int failures = 0; @@ -455,6 +467,7 @@ failures += setScaleTests(); failures += toEngineeringStringTests(); failures += ulpTests(); + failures += setScaleDoesNotMutateTest(); if (failures > 0 ) { throw new RuntimeException("Incurred " + failures + " failures" + --- ./jdk/test/java/math/BigInteger/BigIntegerTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/math/BigInteger/BigIntegerTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -71,6 +71,7 @@ static final int BITS_TOOM_COOK_SQUARE = 6912; static final int BITS_SCHOENHAGE_BASE = 640; static final int BITS_BURNIKEL_ZIEGLER = 2560; + static final int BITS_BURNIKEL_ZIEGLER_OFFSET = 1280; static final int ORDER_SMALL = 60; static final int ORDER_MEDIUM = 100; @@ -288,19 +289,19 @@ * where {@code abs(u) > abs(v)} and {@code a > b && b > 0}, then if * {@code w/z = q1*z + r1} and {@code u/v = q2*v + r2}, then * {@code q1 = q2*pow(2,a-b)} and {@code r1 = r2*pow(2,b)}. The test - * ensures that {@code v} is just under the B-Z threshold and that {@code w} - * and {@code z} are both over the threshold. This implies that {@code u/v} - * uses the standard division algorithm and {@code w/z} uses the B-Z - * algorithm. The results of the two algorithms are then compared using the - * observation described in the foregoing and if they are not equal a - * failure is logged. + * ensures that {@code v} is just under the B-Z threshold, that {@code z} is + * over the threshold and {@code w} is much larger than {@code z}. This + * implies that {@code u/v} uses the standard division algorithm and + * {@code w/z} uses the B-Z algorithm. The results of the two algorithms + * are then compared using the observation described in the foregoing and + * if they are not equal a failure is logged. */ public static void divideLarge() { int failCount = 0; - BigInteger base = BigInteger.ONE.shiftLeft(BITS_BURNIKEL_ZIEGLER - 33); + BigInteger base = BigInteger.ONE.shiftLeft(BITS_BURNIKEL_ZIEGLER + BITS_BURNIKEL_ZIEGLER_OFFSET - 33); for (int i=0; i> headers) { + Set keys = headers.keySet(); + for (String key : keys) { + if (key.indexOf(' ') != -1 || key.indexOf('\t') != -1 + || key.indexOf(':') != -1) + { + failed = true; + } + } + return null; + } + + public CacheRequest put(URI uri, URLConnection c) throws IOException { + return null; + } + } + + static int port; + + static String urlstring, redirstring; + + public static void main (String[] args) throws Exception { + Handler handler = new Handler(); + InetSocketAddress addr = new InetSocketAddress (0); + HttpServer server = HttpServer.create (addr, 0); + port = server.getAddress().getPort(); + HttpContext ctx = server.createContext ("/test", handler); + System.out.println ("Server: " + server.getAddress().getPort()); + ResponseCache.setDefault(new Cache()); + + ExecutorService executor = Executors.newCachedThreadPool(); + server.setExecutor (executor); + server.start (); + + urlstring = "http://127.0.0.1:" + Integer.toString(port)+"/test/foo"; + redirstring = urlstring + "/redirect/bar"; + + URL url = new URL (urlstring); + HttpURLConnection urlc = (HttpURLConnection)url.openConnection(); + urlc.addRequestProperty("X-Foo", "bar"); + urlc.setInstanceFollowRedirects(true); + System.out.println(urlc.getResponseCode()); + InputStream i = urlc.getInputStream(); + int count=0; + for (int c=i.read(); c!=-1; c=i.read()) { + //System.out.write(c); + count++; + } + System.out.println("Read " + count); + System.out.println("FINISHED"); + server.stop(0); + executor.shutdownNow(); + if (failed) { + throw new RuntimeException("Test failed"); + } + } + + public static boolean error = false; + public static int count = 0; + + static class Handler implements HttpHandler { + int invocation = 0; + public void handle (HttpExchange t) + throws IOException + { + InputStream is = t.getRequestBody(); + Headers map = t.getRequestHeaders(); + Headers rmap = t.getResponseHeaders(); + invocation ++; + if (invocation == 1) { + rmap.add("Location", redirstring); + while (is.read () != -1) ; + is.close(); + System.out.println ("sending response"); + t.sendResponseHeaders (301, 0); + } else { + byte[] buf = "Hello world".getBytes(); + t.sendResponseHeaders (200, buf.length); + OutputStream os = t.getResponseBody(); + try { + os.write(buf); + } catch (IOException e) { + System.out.println ("EX 1 " + e); + } + } + System.out.println ("Closing"); + t.close(); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/nio/channels/FileChannel/BlockDeviceSize.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + * @bug 8054029 + * @summary Block devices should not report size=0 on Linux + */ + +import java.io.RandomAccessFile; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.channels.FileChannel; +import java.nio.file.AccessDeniedException; +import java.nio.file.NoSuchFileException; +import static java.nio.file.StandardOpenOption.*; + + +public class BlockDeviceSize { + private static final String BLK_FNAME = "/dev/sda1"; + private static final Path BLK_PATH = Paths.get(BLK_FNAME); + + public static void main(String[] args) throws Throwable { + try (FileChannel ch = FileChannel.open(BLK_PATH, READ); + RandomAccessFile file = new RandomAccessFile(BLK_FNAME, "r")) { + + long size1 = ch.size(); + long size2 = file.length(); + if (size1 != size2) { + throw new RuntimeException("size differs when retrieved" + + " in different ways: " + size1 + " != " + size2); + } + System.out.println("OK"); + + } catch (NoSuchFileException nsfe) { + System.err.println("File " + BLK_FNAME + " not found." + + " Skipping test"); + } catch (AccessDeniedException ade) { + System.err.println("Access to " + BLK_FNAME + " is denied." + + " Run test as root."); + } + } +} --- ./jdk/test/java/nio/file/WatchService/Basic.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/nio/file/WatchService/Basic.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +22,7 @@ */ /* @test - * @bug 4313887 6838333 7017446 + * @bug 4313887 6838333 7017446 8011537 8042470 * @summary Unit test for java.nio.file.WatchService * @library .. * @run main Basic @@ -295,24 +295,31 @@ // IllegalArgumentException System.out.println("IllegalArgumentException tests..."); try { - dir.register(watcher, new WatchEvent.Kind[]{ } ); + dir.register(watcher /*empty event list*/); throw new RuntimeException("IllegalArgumentException not thrown"); } catch (IllegalArgumentException x) { } try { // OVERFLOW is ignored so this is equivalent to the empty set - dir.register(watcher, new WatchEvent.Kind[]{ OVERFLOW }); + dir.register(watcher, OVERFLOW); + throw new RuntimeException("IllegalArgumentException not thrown"); + } catch (IllegalArgumentException x) { + } + try { + // OVERFLOW is ignored even if specified multiple times + dir.register(watcher, OVERFLOW, OVERFLOW); throw new RuntimeException("IllegalArgumentException not thrown"); } catch (IllegalArgumentException x) { } // UnsupportedOperationException try { - dir.register(watcher, new WatchEvent.Kind[]{ + dir.register(watcher, new WatchEvent.Kind() { @Override public String name() { return "custom"; } @Override public Class type() { return Object.class; } - }}); + }); + throw new RuntimeException("UnsupportedOperationException not thrown"); } catch (UnsupportedOperationException x) { } try { @@ -328,7 +335,7 @@ // NullPointerException System.out.println("NullPointerException tests..."); try { - dir.register(null, new WatchEvent.Kind[]{ ENTRY_CREATE }); + dir.register(null, ENTRY_CREATE); throw new RuntimeException("NullPointerException not thrown"); } catch (NullPointerException x) { } @@ -380,7 +387,7 @@ try { dir.register(watcher, new WatchEvent.Kind[]{ ENTRY_CREATE }); - throw new RuntimeException("ClosedWatchServiceException not thrown"); + throw new RuntimeException("ClosedWatchServiceException not thrown"); } catch (ClosedWatchServiceException x) { } @@ -468,6 +475,28 @@ } } + /** + * Test that thread interruped status is preserved upon a call + * to register() + */ + static void testThreadInterrupt(Path dir) throws IOException { + System.out.println("-- Thread interrupted status test --"); + + FileSystem fs = FileSystems.getDefault(); + Thread curr = Thread.currentThread(); + try (WatchService watcher = fs.newWatchService()) { + System.out.println("interrupting current thread"); + curr.interrupt(); + dir.register(watcher, ENTRY_CREATE); + if (!curr.isInterrupted()) + throw new RuntimeException("thread should remain interrupted"); + System.out.println("current thread is still interrupted"); + System.out.println("OKAY"); + } finally { + curr.interrupted(); + } + } + public static void main(String[] args) throws IOException { Path dir = TestUtil.createTemporaryDirectory(); try { @@ -478,6 +507,7 @@ testWakeup(dir); testExceptions(dir); testTwoWatchers(dir); + testThreadInterrupt(dir); } finally { TestUtil.removeAll(dir); --- ./jdk/test/java/security/cert/CertificateFactory/invalidEncodedCerts/DetectInvalidEncoding.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/security/cert/CertificateFactory/invalidEncodedCerts/DetectInvalidEncoding.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,27 +23,548 @@ /** * @test - * @bug 4776466 + * @bug 4776466 8032573 * @summary check that CertificateFactory rejects invalid encoded X.509 certs */ import java.io.*; +import java.util.Collection; +import java.util.List; +import java.util.LinkedList; +import javax.security.auth.x500.X500Principal; +import java.security.GeneralSecurityException; import java.security.cert.*; public class DetectInvalidEncoding { + // Originally found in the test file: + // java/security/cert/CertificateFactory/invalidEncodedCerts/invalidcert.pem + // The first character of the PEM encoding has been changed from "M" to + // "X" to force a failure during decoding. + private static final String INVALID_CERT = + "-----BEGIN CERTIFICATE-----\n" + + "XIICJjCCAdCgAwIBAgIBITANBgkqhkiG9w0BAQQFADCBqTELMAkGA1UEBhMCVVMx\n" + + "EzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFTAT\n" + + "BgNVBAoTDEJFQSBXZWJMb2dpYzERMA8GA1UECxMIU2VjdXJpdHkxIzAhBgNVBAMT\n" + + "GkRlbW8gQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9zdXBw\n" + + "b3J0QGJlYS5jb20wHhcNMDAwNTMwMjEzODAxWhcNMDQwNTEzMjEzODAxWjCBjDEL\n" + + "MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG\n" + + "cmFuY2lzY28xFTATBgNVBAoTDEJFQSBXZWJMb2dpYzEZMBcGA1UEAxMQd2VibG9n\n" + + "aWMuYmVhLmNvbTEeMBwGCSqGSIb3DQEJARYPc3VwcG9ydEBiZWEuY29tMFwwDQYJ\n" + + "KoZIhvcNAQEBBQADSwAwSAJBALdsXEHqKHgs6zj0hU5sXMAUHzoT8kgWXmNkKHXH\n" + + "79qbPh6EfdlriW9G/AbRF/pKrCQu7hhllAxREbqTuSlf2EMCAwEAATANBgkqhkiG\n" + + "9w0BAQQFAANBACgmqflL5m5LNeJGpWx9aIoABCiuDcpw1fFyegsqGX7CBhffcruS\n" + + "1p8h5vkHVbMu1frD1UgGnPlOO/K7Ig/KrsU=\n" + + "-----END CERTIFICATE-----"; + + // Created with keytool: + // keytool -genkeypair -keyalg rsa -keysize 2048 -keystore + // -alias root -sigalg SHA256withRSA -dname "CN=Root, O=SomeCompany" + // -validity 730 -ext bc:critical=ca:true + // -ext ku:critical=keyCertSign,cRLSign + private static final String SINGLE_ROOT_CERT = + "-----BEGIN CERTIFICATE-----\n" + + "MIIDCjCCAfKgAwIBAgIEDUiw+DANBgkqhkiG9w0BAQsFADAlMRQwEgYDVQQKEwtT\n" + + "b21lQ29tcGFueTENMAsGA1UEAxMEUm9vdDAeFw0xNDA4MjgyMTI5MjZaFw0xNjA4\n" + + "MjcyMTI5MjZaMCUxFDASBgNVBAoTC1NvbWVDb21wYW55MQ0wCwYDVQQDEwRSb290\n" + + "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0VFecSNdH6CJhPOSG127\n" + + "tuvld4y7GGJ0kQf3Q0b8qgprsXAmn0/bQR+YX7PfS408cFW+q2SWXeY2kC/3chvi\n" + + "2syMsGdUJrDzuMbYsbvKPKyuJ2GJskX3mSbLMJj5Tzhg4qmwbzDTFIJ51yGa1Wmh\n" + + "i2+4PhltqT0TohvSVJlBrOWNhmvwv5UWsF4e2i04rebDZQoWkmD3MpImZXF/HYre\n" + + "9P8NP97vN0xZmh5PySHy2ILXN3ZhTn3tq0YxNSQTaMUfhgoyzWFvZKAnm/tZIh/1\n" + + "oswwEQPIZJ25AUTm9r3YPQXl1hsNdLU0asEVYRsgzGSTX5gCuUY+KzhStzisOcUY\n" + + "uQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV\n" + + "HQ4EFgQUz1FBNixG/KCgcn6FOWzxP1hujG0wDQYJKoZIhvcNAQELBQADggEBAL60\n" + + "ZaNc6eIMbKntGVE/pdxxyKwPdDyAAeEevX23KRWoLbQjHXo5jrfDPhI5k45ztlyU\n" + + "+tIQbc81LlCl88I4dIx0fvEbxjNaAYhFNXwwSQBs2CuEAdRK8hodXbRcEeI+G10F\n" + + "ARIVs2C7JNm/RhxskCWgj6tFIOGaTZ9gHyvlQUEM18sr5fXZlXTqspZCmz3t5XPi\n" + + "5/wYLv6vk7k3G8WzMHbBE0bYI+61cCc8rbMHldtymbwSwiqfKC9y7oPEfRCbzVUe\n" + + "fgrKcOyVWDuw0y0hhsQL/oONjPp4uK/bl9B7T84t4+ihxdocWKx6eyhFvOvZH9t2\n" + + "kUylb9yBUYStwGExMHg=\n" + + "-----END CERTIFICATE-----"; + + // Created with keytool: + // keytool -genkeypair -keyalg rsa -keysize 2048 -keystore + // -alias root -sigalg SHA256withRSA + // -dname "CN=Intermed, O=SomeCompany" -validity 730 + // -ext bc:critical=ca:true -ext ku:critical=keyCertSign,cRLSign + // keytool -certreq -keystore -sigalg SHA256withRSA + // -alias intermed -dname "CN=Intermed, O=SomeCompany" + // keytool -gencert -keystore -alias intermed + // -sigalg SHA256withRSA -validity 730 + // -ext bc:critical=ca:true -ext ku:critical=keyCertSign,cRLSign + private static final String INTERMED_CA_CERT = + "-----BEGIN CERTIFICATE-----\n" + + "MIIDLzCCAhegAwIBAgIEIIgOyDANBgkqhkiG9w0BAQsFADAlMRQwEgYDVQQKEwtT\n" + + "b21lQ29tcGFueTENMAsGA1UEAxMEUm9vdDAeFw0xNDA4MjgyMjUyNDJaFw0xNjA4\n" + + "MDcyMjUyNDJaMCkxFDASBgNVBAoTC1NvbWVDb21wYW55MREwDwYDVQQDEwhJbnRl\n" + + "cm1lZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJEecvTWla8kdWx+\n" + + "HHu5ryfBpJ95I7V4MEajnmzJVZcwvKhDjlDgABDMuVwFEUUSyeOdbWJF3DLKnyMD\n" + + "KTx6/58kuVak3NX2TJ8cmmIlKf1upFbdrEtjYViSnNrApprfO8B3ORdBbO6QDYza\n" + + "IkAWdI5GllFnVkb4yhMUBg3zfhglF+bl3D3lVRlp9bCrUZoNRs+mZjhVbcMn22ej\n" + + "TfG5Y3VpNM4SN8dFIxPQLLk/aao+cmWEQdbQ0R6ydemRukqrw170olSVLeoGGala\n" + + "3D4oJckde8EgNPcghcsdQ6tpGhkpFhmoyzEsuToR7Gq9UT5V2kkqJneiKXqQg4wz\n" + + "vMAlUGECAwEAAaNjMGEwHwYDVR0jBBgwFoAUOw+92bevFoJz96pR1DrAkPPUKb0w\n" + + "DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLbnErBs\n" + + "q/Mhci5XElfjjLZp3GRyMA0GCSqGSIb3DQEBCwUAA4IBAQAq8y2DpkSV31IXZ1vr\n" + + "/Ye+Nj/2NvBydFeHVRGMAN1LJv6/Q42TCSXbr6cDQ4NWQUtPm90yZBYJSznkbShx\n" + + "HOJEE6R8PRJvoUtMm7fJrNtkybTt6jX4j50Lw8gdYB/rgZb4z8ZQZVEo/0zpW4HV\n" + + "Gs+q4z8TkdmLR18hl39sUEsxt99AOBk8NtKKVNfBWq9b0QDhRkXfmqhyeXdDsHOV\n" + + "8ksulsa7hseheHhdjziEOpQugh8qzSea2kFPrLB53VjWfa4qDzEPaNhahho9piCu\n" + + "82XDnOrcEk9KyHWM7sa7vtK7++W+0MXD/p9nkZ6NHrJXweLriU0DXO6ZY3mzNKJK\n" + + "435M\n" + + "-----END CERTIFICATE-----"; + + // Subordinate cert created using keytool, both certs exported to + // files individually, then use openssl to place in a PKCS#7: + // openssl crl2pkcs7 -nocrl -certfile + // -certfile -out + private static final String PKCS7_INTERMED_ROOT_CERTS = + "-----BEGIN PKCS7-----\n" + + "MIIGbgYJKoZIhvcNAQcCoIIGXzCCBlsCAQExADALBgkqhkiG9w0BBwGgggZBMIID\n" + + "LzCCAhegAwIBAgIEIIgOyDANBgkqhkiG9w0BAQsFADAlMRQwEgYDVQQKEwtTb21l\n" + + "Q29tcGFueTENMAsGA1UEAxMEUm9vdDAeFw0xNDA4MjgyMjUyNDJaFw0xNjA4MDcy\n" + + "MjUyNDJaMCkxFDASBgNVBAoTC1NvbWVDb21wYW55MREwDwYDVQQDEwhJbnRlcm1l\n" + + "ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJEecvTWla8kdWx+HHu5\n" + + "ryfBpJ95I7V4MEajnmzJVZcwvKhDjlDgABDMuVwFEUUSyeOdbWJF3DLKnyMDKTx6\n" + + "/58kuVak3NX2TJ8cmmIlKf1upFbdrEtjYViSnNrApprfO8B3ORdBbO6QDYzaIkAW\n" + + "dI5GllFnVkb4yhMUBg3zfhglF+bl3D3lVRlp9bCrUZoNRs+mZjhVbcMn22ejTfG5\n" + + "Y3VpNM4SN8dFIxPQLLk/aao+cmWEQdbQ0R6ydemRukqrw170olSVLeoGGala3D4o\n" + + "Jckde8EgNPcghcsdQ6tpGhkpFhmoyzEsuToR7Gq9UT5V2kkqJneiKXqQg4wzvMAl\n" + + "UGECAwEAAaNjMGEwHwYDVR0jBBgwFoAUOw+92bevFoJz96pR1DrAkPPUKb0wDwYD\n" + + "VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLbnErBsq/Mh\n" + + "ci5XElfjjLZp3GRyMA0GCSqGSIb3DQEBCwUAA4IBAQAq8y2DpkSV31IXZ1vr/Ye+\n" + + "Nj/2NvBydFeHVRGMAN1LJv6/Q42TCSXbr6cDQ4NWQUtPm90yZBYJSznkbShxHOJE\n" + + "E6R8PRJvoUtMm7fJrNtkybTt6jX4j50Lw8gdYB/rgZb4z8ZQZVEo/0zpW4HVGs+q\n" + + "4z8TkdmLR18hl39sUEsxt99AOBk8NtKKVNfBWq9b0QDhRkXfmqhyeXdDsHOV8ksu\n" + + "lsa7hseheHhdjziEOpQugh8qzSea2kFPrLB53VjWfa4qDzEPaNhahho9piCu82XD\n" + + "nOrcEk9KyHWM7sa7vtK7++W+0MXD/p9nkZ6NHrJXweLriU0DXO6ZY3mzNKJK435M\n" + + "MIIDCjCCAfKgAwIBAgIEdffjKTANBgkqhkiG9w0BAQsFADAlMRQwEgYDVQQKEwtT\n" + + "b21lQ29tcGFueTENMAsGA1UEAxMEUm9vdDAeFw0xNDA4MjgyMjQ2MzZaFw0xNjA4\n" + + "MjcyMjQ2MzZaMCUxFDASBgNVBAoTC1NvbWVDb21wYW55MQ0wCwYDVQQDEwRSb290\n" + + "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAhnXc8Avv54Gk2xjVa2yA\n" + + "lBL/Cug1nyvKl5wqmN+foT6cMOX6bneCkJOJ4lSbch3gvl4ctlX/9hm3pB/+HhSr\n" + + "em2NcLQrLEq8l9Ar4RnqfoXQR4Uy+4P6wj9OcVV7e/v/+ZPnStOoEAtb5nAwsR2b\n" + + "hOC/tIFNwflrsmsmtMSoOiNftpYLFF4eOAdpDrXYMrqNu6ZxZsOQ7WZl4SsVOx1N\n" + + "/IINXwBLyoHJDzLZ0iJEV0O6mh846s0n6QXeK1P5d0uLcoZaZ1k8Q4sRcdoLA6rS\n" + + "e1WffipBFMvIuoDIigkHZIKVYRLG828rO+PFnRah0ybybkVsN6s3oLxfhswZDvut\n" + + "OwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV\n" + + "HQ4EFgQUOw+92bevFoJz96pR1DrAkPPUKb0wDQYJKoZIhvcNAQELBQADggEBACBN\n" + + "wEaV70FKKBINHtNwesd7TB6fgSaVgDZOO08aseHbXnm7AUhtDV3P5rQR2AsKtbg4\n" + + "COhlKw2/Ki18D4DfdCccFKFTRZBjqj2PxNmn6C68l1/bT4PuUXuM7rW++53RcOA7\n" + + "TbgLuzA25kSz7XinRvR8L4VwHtppu5tSYEthMIMgLZLGGV9r7kBfpY8lXdxQM8vb\n" + + "xZUIysasvVtVUFPOTV6g2dfn8QCoqLOmxyzTLdXe4M6acP6f7lmhgr3LMqDtB6K9\n" + + "pN+OImr77zNdZ+jTB+5e9a8gAvc5ZfG7Nk5RfwUatYTAFZ6Uggy2cKmIRpXCia18\n" + + "If78mc7goS1+lHkGCs2hADEA\n" + + "-----END PKCS7-----"; + + // Empty PKCS#7 in DER form can be created with openssl: + // openssl crl2pkcs7 -nocrl -outform DER + private static final byte[] PKCS7_BER_EMPTY = { + 48, 39, 6, 9, 42, -122, 72, -122, + -9, 13, 1, 7, 2, -96, 26, 48, + 24, 2, 1, 1, 49, 0, 48, 11, + 6, 9, 42, -122, 72, -122, -9, 13, + 1, 7, 1, -96, 0, -95, 0, 49, + 0 + }; + + private static final String JTEST_ROOT_CRL = + "-----BEGIN X509 CRL-----\n" + + "MIICoTCBigIBATANBgkqhkiG9w0BAQsFADA1MQ4wDAYDVQQKEwVKVGVzdDELMAkG\n" + + "A1UECxMCSVQxFjAUBgNVBAMTDUpUZXN0IFJvb3QgQ0EXDTE0MDkwNDE4NDIyMVqg\n" + + "MDAuMB8GA1UdIwQYMBaAFO6bllCV6kctH77MfqAtefNeRdsmMAsGA1UdFAQEAgIA\n" + + "jjANBgkqhkiG9w0BAQsFAAOCAgEAmp8ihtiRthknDC+VzehmlQw5u8MftMZYQYk5\n" + + "EI04SwyzY9JTL8QHb4u7fXjnZAyN89aYPypI5OSyDsyyGP/JDNsBt2Um/fl0aaCl\n" + + "Z4Np6x+dB9+oIU1XY7y2+uyQUC5MHivQ5ddbGPoAvK/msbugTGAjHvZpM+l0okiV\n" + + "3SofDrii5BSosFEkXfkf2oG9ZLO3YamsFMEZaOj/eWDyGhTyJMGsq2/8NeTF21Tp\n" + + "YkeDcTHqR5KHoYXjOIaS7NjmErm+uDpKH9Lq+JUcYrbUhmjnq5z04EsPF2F2L7Vb\n" + + "THI+awQAUQit16lXGuz7fFRZi2vPyiaRP5n2QT5D+ac1dAs+oWLDJw6Tf2v9KVTe\n" + + "OmW62yd6zQqCwBg+n57UcNu3sv/Sq3t7iRuN0AmWlIhu659POPQv7Np6bEo6dIpp\n" + + "u7Ze6D2KPtM177ETHYlCx2a3g9VEZYKrVhQ2749St0Cp5szVq691jFZAWYOzcfEO\n" + + "XfK1y25pmlBjvhNIIVRlU+T5rjNb8GaleYKVYnKOcv700K32QxFzcPf7nbNKwW99\n" + + "tcaNHFNP+LW/XP8I3CJ8toXLLcOITKVwMA+0GlO5eL7eX5POc+vE9+7IzGuybmU4\n" + + "uslxoLdJ0NSZWpYmf6a6qrJ67cj5i3706H+eBsWQcShfSYreh+TyWQaGk+fkEiUV\n" + + "iy4QdJ0=\n" + + "-----END X509 CRL-----"; + + private static final String JTEST_INTERMED_CRL = + "-----BEGIN X509 CRL-----\n" + + "MIICzzCBuAIBATANBgkqhkiG9w0BAQsFADA/MQ4wDAYDVQQKEwVKVGVzdDELMAkG\n" + + "A1UECxMCSVQxIDAeBgNVBAMTF0pUZXN0IEludGVybWVkaWF0ZSBDQSAxFw0xNDA5\n" + + "MDQyMjE2NTRaMCIwIAIBBhcNMTQwOTA0MjIxNjU0WjAMMAoGA1UdFQQDCgEFoDAw\n" + + "LjAfBgNVHSMEGDAWgBSvRdjbkSMJ3A7s5H6EWghQ+lkw/zALBgNVHRQEBAICAJsw\n" + + "DQYJKoZIhvcNAQELBQADggIBALJmikMwil8oywhenoO8o9xxCOIU0xrt3KdfiSXw\n" + + "8MtQXZHT9d1C6tlLAsYkWAfmfTvM2OU6wquFCLLsFmDZszbbCqmn4JhYBSKQMqlm\n" + + "IHnsiOFPvITW2FU08fWNLM+FtQzPnTFmx/CJo+wfGpq5tZMIbsccsCJ5uvZVAWGh\n" + + "0KbPmYcJG/O384+kzr/2H2IaoZoMMABec5c5FEF/tpp8jawzY+0VFyaVrumKWdan\n" + + "+3OvRQxT1wLxfNi2vdxB2rmNPo423qanXZAoVv260um3LYlmXBNK1jwQ9lp78jkT\n" + + "B7zMVa4hOUWVxdWc/LE6fUYgPsNqZd+hWy/PolIRp5TS21B5hkc5K87LT59GkexK\n" + + "vNVKQennOLGtH+Q7htK4UeY4Gm/W7UydOQ0k7hZzyfMDkCfLfNfK0l63qKwUku36\n" + + "UdeI1LXqulPEvb/d7rRAAM9p5Sm+RsECj2bcrZBMdIGXcSo26A5tzZpTEC79i4S1\n" + + "yxYIooeBnouUkDJ9+VBsJTSKY5fpU8JSkQPRyHKt+trGAkBt2Ka5MqrHtITzQ1vP\n" + + "5q4tNr45JGEXllH83NlBpWURfsdtkDHa3lxTD/pkrywOCyzz7wQ22D8Kul7EN8nT\n" + + "7LDbN+O3G9GHICxvWlJHp6HMsqGTuH1MIUR+5uZFOJa1S0IzorUIEieLncDUPgzO\n" + + "M4JA\n" + + "-----END X509 CRL-----"; + + // PKCS#7 CRL Set containing JTEST root and intermediate CRLs + private static final String PKCS7_CRL_SET = + "-----BEGIN PKCS7-----\n" + + "MIIFpQYJKoZIhvcNAQcCoIIFljCCBZICAQExADALBgkqhkiG9w0BBwGgAKGCBXgw\n" + + "ggKhMIGKAgEBMA0GCSqGSIb3DQEBCwUAMDUxDjAMBgNVBAoTBUpUZXN0MQswCQYD\n" + + "VQQLEwJJVDEWMBQGA1UEAxMNSlRlc3QgUm9vdCBDQRcNMTQwOTA0MTg0MjIxWqAw\n" + + "MC4wHwYDVR0jBBgwFoAU7puWUJXqRy0fvsx+oC15815F2yYwCwYDVR0UBAQCAgCO\n" + + "MA0GCSqGSIb3DQEBCwUAA4ICAQCanyKG2JG2GScML5XN6GaVDDm7wx+0xlhBiTkQ\n" + + "jThLDLNj0lMvxAdvi7t9eOdkDI3z1pg/Kkjk5LIOzLIY/8kM2wG3ZSb9+XRpoKVn\n" + + "g2nrH50H36ghTVdjvLb67JBQLkweK9Dl11sY+gC8r+axu6BMYCMe9mkz6XSiSJXd\n" + + "Kh8OuKLkFKiwUSRd+R/agb1ks7dhqawUwRlo6P95YPIaFPIkwayrb/w15MXbVOli\n" + + "R4NxMepHkoehheM4hpLs2OYSub64Okof0ur4lRxittSGaOernPTgSw8XYXYvtVtM\n" + + "cj5rBABRCK3XqVca7Pt8VFmLa8/KJpE/mfZBPkP5pzV0Cz6hYsMnDpN/a/0pVN46\n" + + "ZbrbJ3rNCoLAGD6fntRw27ey/9Kre3uJG43QCZaUiG7rn0849C/s2npsSjp0imm7\n" + + "tl7oPYo+0zXvsRMdiULHZreD1URlgqtWFDbvj1K3QKnmzNWrr3WMVkBZg7Nx8Q5d\n" + + "8rXLbmmaUGO+E0ghVGVT5PmuM1vwZqV5gpVico5y/vTQrfZDEXNw9/uds0rBb321\n" + + "xo0cU0/4tb9c/wjcIny2hcstw4hMpXAwD7QaU7l4vt5fk85z68T37sjMa7JuZTi6\n" + + "yXGgt0nQ1JlaliZ/prqqsnrtyPmLfvTof54GxZBxKF9Jit6H5PJZBoaT5+QSJRWL\n" + + "LhB0nTCCAs8wgbgCAQEwDQYJKoZIhvcNAQELBQAwPzEOMAwGA1UEChMFSlRlc3Qx\n" + + "CzAJBgNVBAsTAklUMSAwHgYDVQQDExdKVGVzdCBJbnRlcm1lZGlhdGUgQ0EgMRcN\n" + + "MTQwOTA0MjIxNjU0WjAiMCACAQYXDTE0MDkwNDIyMTY1NFowDDAKBgNVHRUEAwoB\n" + + "BaAwMC4wHwYDVR0jBBgwFoAUr0XY25EjCdwO7OR+hFoIUPpZMP8wCwYDVR0UBAQC\n" + + "AgCbMA0GCSqGSIb3DQEBCwUAA4ICAQCyZopDMIpfKMsIXp6DvKPccQjiFNMa7dyn\n" + + "X4kl8PDLUF2R0/XdQurZSwLGJFgH5n07zNjlOsKrhQiy7BZg2bM22wqpp+CYWAUi\n" + + "kDKpZiB57IjhT7yE1thVNPH1jSzPhbUMz50xZsfwiaPsHxqaubWTCG7HHLAiebr2\n" + + "VQFhodCmz5mHCRvzt/OPpM6/9h9iGqGaDDAAXnOXORRBf7aafI2sM2PtFRcmla7p\n" + + "ilnWp/tzr0UMU9cC8XzYtr3cQdq5jT6ONt6mp12QKFb9utLpty2JZlwTStY8EPZa\n" + + "e/I5Ewe8zFWuITlFlcXVnPyxOn1GID7DamXfoVsvz6JSEaeU0ttQeYZHOSvOy0+f\n" + + "RpHsSrzVSkHp5zixrR/kO4bSuFHmOBpv1u1MnTkNJO4Wc8nzA5Any3zXytJet6is\n" + + "FJLt+lHXiNS16rpTxL2/3e60QADPaeUpvkbBAo9m3K2QTHSBl3EqNugObc2aUxAu\n" + + "/YuEtcsWCKKHgZ6LlJAyfflQbCU0imOX6VPCUpED0chyrfraxgJAbdimuTKqx7SE\n" + + "80Nbz+auLTa+OSRhF5ZR/NzZQaVlEX7HbZAx2t5cUw/6ZK8sDgss8+8ENtg/Crpe\n" + + "xDfJ0+yw2zfjtxvRhyAsb1pSR6ehzLKhk7h9TCFEfubmRTiWtUtCM6K1CBIni53A\n" + + "1D4MzjOCQDEA\n" + + "-----END PKCS7-----"; + public static void main(String[] args) throws Exception { CertificateFactory cf = CertificateFactory.getInstance("X.509"); - File f = new File - (System.getProperty("test.src", "."), "invalidcert.pem"); - InputStream inStream = new FileInputStream(f); - try { - X509Certificate cert = - (X509Certificate) cf.generateCertificate(inStream); - } catch (CertificateParsingException ce) { - return; + List validTests = new LinkedList<>(); + List invalidTests = new LinkedList<>(); + + // Load up positive test cases (for sanity checks) + StringBuilder sb = new StringBuilder(); + + validTests.add(new GenMultiCertTest("Single, valid certificate", + SINGLE_ROOT_CERT.getBytes(), null, + new X500Principal("CN=Root, O=SomeCompany"))); + validTests.add(new GenMultiCertTest("PEM-encoded PKCS#7 chain", + PKCS7_INTERMED_ROOT_CERTS.getBytes(), null, + new X500Principal("CN=Intermed, O=SomeCompany"), + new X500Principal("CN=Root, O=SomeCompany"))); + validTests.add(new GenMultiCertTest("Two PEM-encoded X509 certs", + (INTERMED_CA_CERT + "\n" + SINGLE_ROOT_CERT).getBytes(), + null, + new X500Principal("CN=Intermed, O=SomeCompany"), + new X500Principal("CN=Root, O=SomeCompany"))); + validTests.add(new GenMultiCertTest("Empty data", new byte[0], null)); + + sb.append("Certificate 1: CN=Root, O=SomeCompany\n"); + sb.append(SINGLE_ROOT_CERT).append("\n"); + sb.append("Certificate 2: CN=Intermed, O=SomeCompany\n"); + sb.append(INTERMED_CA_CERT).append("\n"); + sb.append("Extra trailing data\n"); + validTests.add(new GenMultiCertTest( + "Two PEM-encoded certs with leading/trailing " + + "text data around each.", sb.toString().getBytes(), null, + new X500Principal("CN=Root, O=SomeCompany"), + new X500Principal("CN=Intermed, O=SomeCompany"))); + validTests.add(new GenMultiCertTest( + "BER-encoded PKCS#7 with empty certificates segment", + PKCS7_BER_EMPTY, null)); + validTests.add(new GenMultiCRLTest( + "CRL with leading and trailing text data", + ("This is a CRL\n" + JTEST_ROOT_CRL + + "\nSee? Told you so\n\n").getBytes(), null, + new X500Principal("CN=JTest Root CA,OU=IT,O=JTest"))); + validTests.add(new GenMultiCRLTest( + "Two CRLs, one after the other with leading/trailing text", + ("This is a CRL\n" + JTEST_ROOT_CRL + + "\nAnd this is another CRL\n" + JTEST_INTERMED_CRL + + "\nAnd this is trailing text\n").getBytes(), null, + new X500Principal("CN=JTest Root CA,OU=IT,O=JTest"), + new X500Principal( + "CN=JTest Intermediate CA 1,OU=IT,O=JTest"))); + validTests.add(new GenMultiCRLTest("Two CRLs in a PKCS#7 CRL set", + PKCS7_CRL_SET.getBytes(), null, + new X500Principal("CN=JTest Root CA,OU=IT,O=JTest"), + new X500Principal("CN=JTest Intermediate CA 1,OU=IT,O=JTest"))); + + // Load up all test cases where we expect failures + invalidTests.add(new GenSingleCertTest("Invalid PEM encoding", + INVALID_CERT.getBytes(), + new CertificateParsingException())); + invalidTests.add(new GenMultiCertTest("Invalid PEM encoding", + INVALID_CERT.getBytes(), + new CertificateParsingException())); + invalidTests.add(new GenMultiCertTest( + "Two cert sequence, one valid and one invalid", + (INTERMED_CA_CERT + "\n" + INVALID_CERT).getBytes(), + new CertificateParsingException())); + invalidTests.add(new GenMultiCertTest("Non-certificate text", + "This is not a certificate".getBytes(), + new CertificateException())); + invalidTests.add(new GenMultiCertTest( + "Non-certificate text with partial PEM header (4 hyphens)", + "----This is not a valid x509 certificate".getBytes(), + new CertificateException())); + invalidTests.add(new GenMultiCertTest( + "Leading non-certificate text plus valid PEM header, " + + "but not on new line", + "This is not valid -----BEGIN CERTIFICATE-----".getBytes(), + new CertificateException())); + byte[] emptyCString = {0}; + invalidTests.add(new GenMultiCertTest("Empty C-style string", + emptyCString, new CertificateException())); + invalidTests.add(new GenMultiCRLTest("Non-CRL text", + "This is not a CRL".getBytes(), new CRLException())); + invalidTests.add(new GenMultiCRLTest("Valid headers, but not a CRL", + INTERMED_CA_CERT.getBytes(), new CRLException())); + + System.out.println("===== Valid Tests ====="); + for (DecodeTest dt : validTests) { + dt.passTest(); } - throw new Exception("CertificateFactory.generateCertificate() did not " - + "throw CertificateParsingException on invalid X.509 cert data"); + System.out.print("\n"); + + System.out.println("===== Invalid Tests ====="); + for (DecodeTest dt : invalidTests) { + dt.failTest(); + } + } + + public static abstract class DecodeTest { + protected String testName; + protected byte[] testData; + protected Throwable expectedException; + protected X500Principal[] principals; + protected CertificateFactory cf; + + /** + * Construct a DecodeTest + * + * @param name The test name + * @param input A byte array consisting of the input for this test + * @param failType An exception whose class should match the expected + * exception that will be thrown when this test is run + * @param princs Zero of more X500Principals which will be used + * to compare the output in a success case. + */ + DecodeTest(String name, byte[] input, Throwable failType, + X500Principal... princs) throws CertificateException { + testName = name; + testData = input.clone(); + expectedException = failType; + principals = princs; + cf = CertificateFactory.getInstance("X.509"); + } + + public abstract void passTest() throws GeneralSecurityException; + + public abstract void failTest() throws GeneralSecurityException; + } + + public static class GenMultiCertTest extends DecodeTest { + public GenMultiCertTest(String name, byte[] input, Throwable failType, + X500Principal... princs) throws CertificateException { + super(name, input, failType, princs); + } + + @Override + public void passTest() throws GeneralSecurityException { + Collection certs; + + System.out.println("generateCertificates(): " + testName); + certs = cf.generateCertificates(new ByteArrayInputStream(testData)); + + // Walk the certs Collection and do a comparison of subject names + int i = 0; + if (certs.size() == principals.length) { + for (Certificate crt : certs) { + X509Certificate xc = (X509Certificate)crt; + if (!xc.getSubjectX500Principal().equals( + principals[i])) { + throw new RuntimeException("Name mismatch: " + + "cert: " + xc.getSubjectX500Principal() + + ", expected: " + principals[i]); + } + i++; + } + } else { + throw new RuntimeException("Size mismatch: certs = " + + certs.size() + ", expected = " + + principals.length); + } + } + + @Override + public void failTest() throws GeneralSecurityException { + Throwable caughtException = null; + Collection certs = null; + + System.out.println("generateCertificates(): " + testName); + if (expectedException == null) { + throw new RuntimeException("failTest requires non-null " + + "expectedException"); + } + + try { + certs = + cf.generateCertificates(new ByteArrayInputStream(testData)); + } catch (CertificateException ce) { + caughtException = ce; + } + + if (caughtException != null) { + // It has to be the right kind of exception though... + if (!caughtException.getClass().equals( + expectedException.getClass())) { + System.err.println("Unexpected exception thrown. " + + "Received: " + caughtException + ", Expected: " + + expectedException.getClass()); + throw new RuntimeException(caughtException); + } + } else { + // For a failure test, we'd expect some kind of exception + // to be thrown. + throw new RuntimeException("Failed to catch expected " + + "exception " + expectedException.getClass()); + } + } + } + + public static class GenSingleCertTest extends DecodeTest { + public GenSingleCertTest(String name, byte[] input, Throwable failType, + X500Principal... princs) throws CertificateException { + super(name, input, failType, princs); + } + + @Override + public void passTest() throws GeneralSecurityException { + X509Certificate cert; + + System.out.println("generateCertificate(): " + testName); + cert = (X509Certificate)cf.generateCertificate( + new ByteArrayInputStream(testData)); + + // Compare the cert's subject name against the expected value + // provided in the test. If multiple X500Principals were provided + // just use the first one as the expected value. + if (!cert.getSubjectX500Principal().equals(principals[0])) { + throw new RuntimeException("Name mismatch: " + + "cert: " + cert.getSubjectX500Principal() + + ", expected: " + principals[0]); + } + } + + @Override + public void failTest() throws GeneralSecurityException { + Throwable caughtException = null; + X509Certificate cert = null; + System.out.println("generateCertificate(): " + testName); + + if (expectedException == null) { + throw new RuntimeException("failTest requires non-null " + + "expectedException"); + } + + try { + cert = (X509Certificate)cf.generateCertificate( + new ByteArrayInputStream(testData)); + } catch (CertificateException e) { + caughtException = e; + } + + if (caughtException != null) { + // It has to be the right kind of exception though... + if (!caughtException.getClass().equals( + expectedException.getClass())) { + System.err.println("Unexpected exception thrown. " + + "Received: " + caughtException + ", Expected: " + + expectedException.getClass()); + throw new RuntimeException(caughtException); + } + } else { + // For a failure test, we'd expect some kind of exception + // to be thrown. + throw new RuntimeException("Failed to catch expected " + + "exception " + expectedException.getClass()); + } + } + } + + public static class GenMultiCRLTest extends DecodeTest { + public GenMultiCRLTest(String name, byte[] input, Throwable failType, + X500Principal... princs) throws CertificateException { + super(name, input, failType, princs); + } + + @Override + public void passTest() throws GeneralSecurityException { + Collection crls; + + System.out.println("generateCRLs(): " + testName); + crls = cf.generateCRLs(new ByteArrayInputStream(testData)); + + // Walk the crls Collection and do a comparison of issuer names + int i = 0; + if (crls.size() == principals.length) { + for (CRL revlist : crls) { + X509CRL xc = (X509CRL)revlist; + if (!xc.getIssuerX500Principal().equals(principals[i])) { + throw new RuntimeException("Name mismatch: " + + "CRL: " + xc.getIssuerX500Principal() + + ", expected: " + principals[i]); + } + i++; + } + } else { + throw new RuntimeException("Size mismatch: crls = " + + crls.size() + ", expected = " + + principals.length); + } + } + + @Override + public void failTest() throws GeneralSecurityException { + Throwable caughtException = null; + Collection crls = null; + + System.out.println("generateCRLs(): " + testName); + if (expectedException == null) { + throw new RuntimeException("failTest requires non-null " + + "expectedException"); + } + + try { + crls = + cf.generateCRLs(new ByteArrayInputStream(testData)); + } catch (CRLException e) { + caughtException = e; + } + + if (caughtException != null) { + // It has to be the right kind of exception though... + if (!caughtException.getClass().equals( + expectedException.getClass())) { + System.err.println("Unexpected exception thrown. " + + "Received: " + caughtException + ", Expected: " + + expectedException.getClass()); + throw new RuntimeException(caughtException); + } + } else { + // For a failure test, we'd expect some kind of exception + // to be thrown. + throw new RuntimeException("Failed to catch expected " + + "exception " + expectedException.getClass()); + } + } } } --- ./jdk/test/java/security/cert/CertificateFactory/invalidEncodedCerts/invalidcert.pem Mon Dec 08 12:29:42 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -XIICJjCCAdCgAwIBAgIBITANBgkqhkiG9w0BAQQFADCBqTELMAkGA1UEBhMCVVMx -EzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFTAT -BgNVBAoTDEJFQSBXZWJMb2dpYzERMA8GA1UECxMIU2VjdXJpdHkxIzAhBgNVBAMT -GkRlbW8gQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9zdXBw -b3J0QGJlYS5jb20wHhcNMDAwNTMwMjEzODAxWhcNMDQwNTEzMjEzODAxWjCBjDEL -MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xFTATBgNVBAoTDEJFQSBXZWJMb2dpYzEZMBcGA1UEAxMQd2VibG9n -aWMuYmVhLmNvbTEeMBwGCSqGSIb3DQEJARYPc3VwcG9ydEBiZWEuY29tMFwwDQYJ -KoZIhvcNAQEBBQADSwAwSAJBALdsXEHqKHgs6zj0hU5sXMAUHzoT8kgWXmNkKHXH -79qbPh6EfdlriW9G/AbRF/pKrCQu7hhllAxREbqTuSlf2EMCAwEAATANBgkqhkiG -9w0BAQQFAANBACgmqflL5m5LNeJGpWx9aIoABCiuDcpw1fFyegsqGX7CBhffcruS -1p8h5vkHVbMu1frD1UgGnPlOO/K7Ig/KrsU= ------END CERTIFICATE----- --- ./jdk/test/java/text/Format/DecimalFormat/TieRoundingTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/text/Format/DecimalFormat/TieRoundingTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ /* @test * - * @bug 7131459 + * @bug 7131459 8039915 * @summary test various situations of NumberFormat rounding when close to tie * @author Olivier Lagneau * @run main TieRoundingTest @@ -56,7 +56,7 @@ if (!result.equals(expectedOutput)) { System.out.println(); System.out.println("========================================"); - System.out.println("***Error formatting double value from string : " + + System.out.println("***Failure : error formatting value from string : " + inputDigits); System.out.println("NumberFormat pattern is : " + ((DecimalFormat ) nf).toPattern()); @@ -103,7 +103,7 @@ if (!result.equals(expectedOutput)) { System.out.println(); System.out.println("========================================"); - System.out.println("***Error formatting double value from string : " + + System.out.println("***Failure : error formatting value from string : " + inputDigits); System.out.println("NumberFormat pattern is : " + ((DecimalFormat ) nf).toPattern()); @@ -144,7 +144,7 @@ if (!result.equals(expectedOutput)) { System.out.println(); System.out.println("========================================"); - System.out.println("***Error formatting number value from string : " + + System.out.println("***Failure : error formatting value from string : " + inputDigits); System.out.println("NumberFormat pattern is : " + ((DecimalFormat ) nf).toPattern()); @@ -174,7 +174,7 @@ public static void main(String[] args) { - // Only the 3 rounding modes below may be impacted by bug 7131459. + // The 3 HALF_* rounding modes are impacted by bugs 7131459, 8039915. // So we do not test the other rounding modes. RoundingMode[] roundingModes = { RoundingMode.HALF_DOWN, @@ -183,10 +183,14 @@ }; // Precise the relative position of input value against its closest tie. + // The double values tested below for 3 and 5 fractional digits must follow + // this scheme (position toward tie). String[] tieRelativePositions = { "below", "exact", "above", "below", "exact", "above", "below", "exact", "above", + "below", "above", "above", + "below", "below", "above", "below", "exact", "above" }; @@ -196,9 +200,13 @@ double[] values3FractDigits = { // unimpacting values close to tie, with less than 3 input fract digits 1.115d, 1.125d, 1.135d, - // impacting close to tie values covering all 6 cases + // HALF_* impacting close to tie values covering all 6 tie cases 0.3115d, 0.3125d, 0.3135d, 0.6865d, 0.6875d, 0.6885d, + // specific HALF_UP close to tie values + 0.3124d, 0.3126d, 0.3128d, + // specific HALF_DOWN close to tie values + 0.6864d, 0.6865d, 0.6868d, // unimpacting values close to tie, with more than 3 input fract digits 1.46885d, 2.46875d, 1.46865d }; @@ -207,6 +215,8 @@ "1.115d", "1.125d", "1.135d", "0.3115d", "0.3125d", "0.3135d", "0.6865d", "0.6875d", "0.6885d", + "0.3124d", "0.3126d", "0.3128d", + "0.6864d", "0.6865d", "0.6868d", "1.46885d", "2.46875d", "1.46865d" }; @@ -214,16 +224,22 @@ {"1.115", "1.125", "1.135", "0.311", "0.312", "0.314", "0.686", "0.687", "0.689", + "0.312", "0.313", "0.313", + "0.686", "0.686", "0.687", "1.469", "2.469", "1.469" }, {"1.115", "1.125", "1.135", "0.311", "0.312", "0.314", "0.686", "0.688", "0.689", + "0.312", "0.313", "0.313", + "0.686", "0.686", "0.687", "1.469", "2.469", "1.469" }, {"1.115", "1.125", "1.135", "0.311", "0.313", "0.314", "0.686", "0.688", "0.689", + "0.312", "0.313", "0.313", + "0.686", "0.686", "0.687", "1.469", "2.469", "1.469" }, }; @@ -250,9 +266,13 @@ double[] values5FractDigits = { // unimpacting values close to tie, with less than 5 input fract digits 1.3135d, 1.3125d, 1.3115d, - // impacting values close to tie, covering all 6 cases + // HALF_* impacting values close to tie, covering all 6 cases 1.328115d, 1.328125d, 1.328135d, 1.796865d, 1.796875d, 1.796885d, + // specific HALF_UP close to tie values + 1.328124d, 1.798876d, 1.796889d, + // specific HALF_DOWN close to tie values + 1.328114d, 1.796865d, 1.328138d, // unimpacting values close to tie, with more than 5 input fract digits 1.3281149999999d, 1.75390625d, 1.7968750000001d }; @@ -261,6 +281,8 @@ "1.3135d", "1.3125d", "1.3115d", "1.328115d", "1.328125d", "1.328135d", "1.796865d", "1.796875d", "1.796885d", + "1.328124d", "1.798876d", "1.796889d", + "1.328114d", "1.796865d", "1.328138d", "1.3281149999999d", "1.75390625d", "1.7968750000001d" }; @@ -268,16 +290,22 @@ {"1.3135", "1.3125", "1.3115", "1.32811", "1.32812", "1.32814", "1.79686", "1.79687", "1.79689", + "1.32812", "1.79888", "1.79689", + "1.32811", "1.79686", "1.32814", "1.32811", "1.75391", "1.79688" }, {"1.3135", "1.3125", "1.3115", "1.32811", "1.32812", "1.32814", "1.79686", "1.79688", "1.79689", + "1.32812", "1.79888", "1.79689", + "1.32811", "1.79686", "1.32814", "1.32811", "1.75391", "1.79688" }, {"1.3135", "1.3125", "1.3115", "1.32811", "1.32813", "1.32814", "1.79686", "1.79688", "1.79689", + "1.32812", "1.79888", "1.79689", + "1.32811", "1.79686", "1.32814", "1.32811", "1.75391", "1.79688" } }; --- ./jdk/test/java/util/BitSet/BSMethods.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/util/BitSet/BSMethods.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,9 +23,10 @@ /* @test * @bug 4098239 4107540 4080736 4261102 4274710 4305272 - * 4979017 4979028 4979031 5030267 6222207 + * 4979017 4979028 4979031 5030267 6222207 8040806 * @summary Test the operation of the methods of BitSet class * @author Mike McCloskey, Martin Buchholz + * @run main/othervm BSMethods */ import java.util.*; @@ -897,6 +898,21 @@ private static void testToString() { check(new BitSet().toString().equals("{}")); check(makeSet(2,3,42,43,234).toString().equals("{2, 3, 42, 43, 234}")); + + final long MB = 1024*1024; + if (Runtime.getRuntime().maxMemory() >= 512*MB) { + // only run it if we have enough memory + try { + check(makeSet(Integer.MAX_VALUE-1).toString().equals( + "{" + (Integer.MAX_VALUE-1) + "}")); + check(makeSet(Integer.MAX_VALUE).toString().equals( + "{" + Integer.MAX_VALUE + "}")); + check(makeSet(0, 1, Integer.MAX_VALUE-1, Integer.MAX_VALUE).toString().equals( + "{0, 1, " + (Integer.MAX_VALUE-1) + ", " + Integer.MAX_VALUE + "}")); + } catch (IndexOutOfBoundsException exc) { + fail("toString() with indices near MAX_VALUE"); + } + } } private static void testLogicalIdentities() { --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/util/Collections/SyncSubMutexes.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8048209 + * @summary Check that Collections.synchronizedNavigableSet().tailSet() is using + * the same lock object as it's source. + * @run testng SyncSubMutexes + */ +import java.lang.reflect.Field; +import java.util.*; +import java.util.Set; +import java.util.Arrays; + +import org.testng.annotations.Test; +import org.testng.annotations.DataProvider; +import static org.testng.Assert.assertSame; + +public class SyncSubMutexes { + + @Test(dataProvider = "Collections") + public void testCollections(Collection instance) { + // nothing to test, no subset methods + } + + @Test(dataProvider = "Lists") + public void testLists(List instance) { + assertSame(getSyncCollectionMutex(instance.subList(0, 1)), getSyncCollectionMutex(instance)); + } + + @Test(dataProvider = "Sets") + public void testSets(Set instance) { + // nothing to test, no subset methods + + } + + @Test(dataProvider = "SortedSets") + public void testSortedSets(SortedSet instance) { + assertSame(getSyncCollectionMutex(instance.headSet("Echo")), getSyncCollectionMutex(instance)); + assertSame(getSyncCollectionMutex(instance.tailSet("Charlie")), getSyncCollectionMutex(instance)); + assertSame(getSyncCollectionMutex(instance.subSet("Charlie", "Echo")), getSyncCollectionMutex(instance)); + + } + + @Test(dataProvider = "NavigableSets") + public void testNavigableSets(NavigableSet instance) { + assertSame(getSyncCollectionMutex(instance.descendingSet()), getSyncCollectionMutex(instance)); + assertSame(getSyncCollectionMutex(instance.headSet("Echo")), getSyncCollectionMutex(instance)); + assertSame(getSyncCollectionMutex(instance.headSet("Echo", true)), getSyncCollectionMutex(instance)); + assertSame(getSyncCollectionMutex(instance.tailSet("Charlie")), getSyncCollectionMutex(instance)); + assertSame(getSyncCollectionMutex(instance.tailSet("Charlie", true)), getSyncCollectionMutex(instance)); + assertSame(getSyncCollectionMutex(instance.subSet("Charlie", "Echo")), getSyncCollectionMutex(instance)); + assertSame(getSyncCollectionMutex(instance.subSet("Charlie", true, "Echo", true)), getSyncCollectionMutex(instance)); + } + + @Test(dataProvider = "Maps") + public void testMaps(Map instance) { + assertSame(getSyncCollectionMutex(instance.entrySet()), getSyncMapMutex(instance)); + assertSame(getSyncCollectionMutex(instance.keySet()), getSyncMapMutex(instance)); + assertSame(getSyncCollectionMutex(instance.values()), getSyncMapMutex(instance)); + } + + @Test(dataProvider = "SortedMaps") + public void testSortedMaps(SortedMap instance) { + assertSame(getSyncCollectionMutex(instance.entrySet()), getSyncMapMutex(instance)); + assertSame(getSyncCollectionMutex(instance.keySet()), getSyncMapMutex(instance)); + assertSame(getSyncCollectionMutex(instance.values()), getSyncMapMutex(instance)); + assertSame(getSyncMapMutex(instance.headMap("Echo")), getSyncMapMutex(instance)); + assertSame(getSyncMapMutex(instance.tailMap("Charlie")), getSyncMapMutex(instance)); + assertSame(getSyncMapMutex(instance.subMap("Charlie", "Echo")), getSyncMapMutex(instance)); + } + + @Test(dataProvider = "NavigableMaps") + public void testNavigableMaps(NavigableMap instance) { + assertSame(getSyncMapMutex(instance.descendingMap()), getSyncMapMutex(instance)); + assertSame(getSyncCollectionMutex(instance.entrySet()), getSyncMapMutex(instance)); + assertSame(getSyncCollectionMutex(instance.keySet()), getSyncMapMutex(instance)); + assertSame(getSyncCollectionMutex(instance.descendingKeySet()), getSyncMapMutex(instance)); + assertSame(getSyncCollectionMutex(instance.values()), getSyncMapMutex(instance)); + assertSame(getSyncMapMutex(instance.headMap("Echo")), getSyncMapMutex(instance)); + assertSame(getSyncMapMutex(instance.headMap("Echo", true)), getSyncMapMutex(instance)); + assertSame(getSyncMapMutex(instance.tailMap("Charlie")), getSyncMapMutex(instance)); + assertSame(getSyncMapMutex(instance.tailMap("Charlie", true)), getSyncMapMutex(instance)); + assertSame(getSyncMapMutex(instance.subMap("Charlie", true, "Echo", true)), getSyncMapMutex(instance)); + assertSame(getSyncMapMutex(instance.subMap("Charlie", true, "Echo", true)), getSyncMapMutex(instance)); + } + + @DataProvider(name = "Collections", parallel = true) + public static Iterator collectionProvider() { + return makeCollections().iterator(); + } + + @DataProvider(name = "Lists", parallel = true) + public static Iterator listProvider() { + return makeLists().iterator(); + } + + @DataProvider(name = "Sets", parallel = true) + public static Iterator setProvider() { + return makeSets().iterator(); + } + + @DataProvider(name = "SortedSets", parallel = true) + public static Iterator sortedsetProvider() { + return makeSortedSets().iterator(); + } + + @DataProvider(name = "NavigableSets", parallel = true) + public static Iterator navigablesetProvider() { + return makeNavigableSets().iterator(); + } + + @DataProvider(name = "Maps", parallel = true) + public static Iterator mapProvider() { + return makeMaps().iterator(); + } + + @DataProvider(name = "SortedMaps", parallel = true) + public static Iterator sortedmapProvider() { + return makeSortedMaps().iterator(); + } + + @DataProvider(name = "NavigableMaps", parallel = true) + public static Iterator navigablemapProvider() { + return makeNavigableMaps().iterator(); + } + + private static final Collection BASE_COLLECTION = Collections.unmodifiableCollection( + Arrays.asList("Alpha", "Bravo", "Charlie", "Delta", "Echo", "Foxtrot", "Golf") + ); + private static final Map BASE_MAP; + + static { + Map map = new HashMap<>(); + for(String each : BASE_COLLECTION) { + map.put(each, "*" + each + "*"); + } + BASE_MAP = Collections.unmodifiableMap(map); + } + + public static Collection makeCollections() { + Collection instances = new ArrayList<>(); + instances.add(new Object[] {Collections.synchronizedCollection(new ArrayList<>(BASE_COLLECTION))}); + instances.addAll(makeLists()); + + return instances; + } + + public static Collection makeLists() { + Collection instances = new ArrayList<>(); + instances.add(new Object[] {Collections.synchronizedList(new ArrayList<>(BASE_COLLECTION))}); + instances.add(new Object[] {Collections.synchronizedList(new ArrayList<>(BASE_COLLECTION)).subList(1, 2)}); + + return instances; + } + + public static Collection makeSets() { + Collection instances = new ArrayList<>(); + + instances.add(new Object[] {Collections.synchronizedSet(new TreeSet<>(BASE_COLLECTION))}); + instances.addAll(makeSortedSets()); + return instances; + } + + public static Collection makeSortedSets() { + Collection instances = new ArrayList<>(); + instances.add(new Object[] {Collections.synchronizedSortedSet(new TreeSet<>(BASE_COLLECTION))}); + instances.add(new Object[] {Collections.synchronizedSortedSet(new TreeSet<>(BASE_COLLECTION)).headSet("Foxtrot")}); + instances.add(new Object[] {Collections.synchronizedSortedSet(new TreeSet<>(BASE_COLLECTION)).tailSet("Bravo")}); + instances.add(new Object[] {Collections.synchronizedSortedSet(new TreeSet<>(BASE_COLLECTION)).subSet("Bravo", "Foxtrot")}); + instances.addAll(makeNavigableSets()); + + return instances; + } + + public static Collection makeNavigableSets() { + Collection instances = new ArrayList<>(); + + instances.add(new Object[] {Collections.synchronizedNavigableSet(new TreeSet<>(BASE_COLLECTION))}); + instances.add(new Object[] {Collections.synchronizedNavigableSet(new TreeSet<>(BASE_COLLECTION)).descendingSet().descendingSet()}); + instances.add(new Object[] {Collections.synchronizedNavigableSet(new TreeSet<>(BASE_COLLECTION)).headSet("Foxtrot")}); + instances.add(new Object[] {Collections.synchronizedNavigableSet(new TreeSet<>(BASE_COLLECTION)).headSet("Foxtrot", true)}); + instances.add(new Object[] {Collections.synchronizedNavigableSet(new TreeSet<>(BASE_COLLECTION)).tailSet("Bravo")}); + instances.add(new Object[] {Collections.synchronizedNavigableSet(new TreeSet<>(BASE_COLLECTION)).tailSet("Bravo", true)}); + instances.add(new Object[] {Collections.synchronizedNavigableSet(new TreeSet<>(BASE_COLLECTION)).subSet("Bravo", "Foxtrot")}); + instances.add(new Object[] {Collections.synchronizedNavigableSet(new TreeSet<>(BASE_COLLECTION)).subSet("Bravo", true, "Foxtrot", true)}); + + return instances; + } + + public static Collection makeMaps() { + Collection instances = new ArrayList<>(); + + instances.add(new Object[] {Collections.synchronizedMap(new HashMap<>(BASE_MAP))}); + instances.addAll(makeSortedMaps()); + + return instances; + } + + public static Collection makeSortedMaps() { + Collection instances = new ArrayList<>(); + + instances.add(new Object[] {Collections.synchronizedSortedMap(new TreeMap<>(BASE_MAP))}); + instances.add(new Object[] {Collections.synchronizedSortedMap(new TreeMap<>(BASE_MAP)).headMap("Foxtrot")}); + instances.add(new Object[] {Collections.synchronizedSortedMap(new TreeMap<>(BASE_MAP)).tailMap("Bravo")}); + instances.add(new Object[] {Collections.synchronizedSortedMap(new TreeMap<>(BASE_MAP)).subMap("Bravo", "Foxtrot")}); + instances.addAll(makeNavigableMaps()); + + return instances; + } + + public static Collection makeNavigableMaps() { + Collection instances = new ArrayList<>(); + + instances.add(new Object[] {Collections.synchronizedNavigableMap(new TreeMap<>(BASE_MAP))}); + instances.add(new Object[] {Collections.synchronizedNavigableMap(new TreeMap<>(BASE_MAP).descendingMap().descendingMap())}); + instances.add(new Object[] {Collections.synchronizedNavigableMap(new TreeMap<>(BASE_MAP)).headMap("Foxtrot")}); + instances.add(new Object[] {Collections.synchronizedNavigableMap(new TreeMap<>(BASE_MAP)).headMap("Foxtrot", true)}); + instances.add(new Object[] {Collections.synchronizedNavigableMap(new TreeMap<>(BASE_MAP)).tailMap("Bravo")}); + instances.add(new Object[] {Collections.synchronizedNavigableMap(new TreeMap<>(BASE_MAP)).tailMap("Bravo", true)}); + instances.add(new Object[] {Collections.synchronizedNavigableMap(new TreeMap<>(BASE_MAP)).subMap("Bravo", "Foxtrot")}); + instances.add(new Object[] {Collections.synchronizedNavigableMap(new TreeMap<>(BASE_MAP)).subMap("Bravo", true, "Foxtrot", true)}); + + return instances; + } + + private static Object getSyncCollectionMutex(Collection from) { + try { + Class synchronizedCollectionClazz = Class.forName("java.util.Collections$SynchronizedCollection"); + Field f = synchronizedCollectionClazz.getDeclaredField("mutex"); + f.setAccessible(true); + return f.get(from); + } catch ( ClassNotFoundException | NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException("Unable to get mutex field.", e); + } + } + + private static Object getSyncMapMutex(Map from) { + try { + Class synchronizedMapClazz = Class.forName("java.util.Collections$SynchronizedMap"); + Field f = synchronizedMapClazz.getDeclaredField("mutex"); + f.setAccessible(true); + return f.get(from); + } catch ( ClassNotFoundException | NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException("Unable to get mutex field.", e); + } + } + +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/util/IdentityHashMap/Capacity.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Collections; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Random; + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.testng.Assert.*; + +/* + * @test + * @bug 6904367 + * @summary IdentityHashMap reallocates storage when inserting expected + * number of elements + * @run testng Capacity + */ + +@Test +public class Capacity { + static final Field tableField; + static final Random random = new Random(); + static final Object[][] sizesData; + + @DataProvider(name="sizes", parallel = true) + public Object[][] sizesToTest() { return sizesData; } + + static { + try { + tableField = IdentityHashMap.class.getDeclaredField("table"); + tableField.setAccessible(true); + } catch (NoSuchFieldException e) { + throw new LinkageError("table", e); + } + + ArrayList sizes = new ArrayList<>(); + for (int size = 0; size < 200; size++) + sizes.add(new Object[] { size }); + + // some numbers known to demonstrate bug 6904367 + for (int size : new int[] {682, 683, 1365, 2730, 2731, 5461}) + sizes.add(new Object[] { size }); + + // a few more random sizes to try + for (int i = 0; i != 128; i++) + sizes.add(new Object[] { random.nextInt(5000) }); + + sizesData = sizes.toArray(new Object[0][]); + } + + static int capacity(IdentityHashMap map) { + try { + return ((Object[]) tableField.get(map)).length / 2; + } catch (Throwable t) { + throw new LinkageError("table", t); + } + } + + static void assertCapacity(IdentityHashMap map, + int expectedCapacity) { + assertEquals(capacity(map), expectedCapacity); + } + + static void growUsingPut(IdentityHashMap map, + int elementsToAdd) { + for (int i = 0; i < elementsToAdd; i++) + map.put(new Object(), new Object()); + } + + static void growUsingPutAll(IdentityHashMap map, + int elementsToAdd) { + IdentityHashMap other = new IdentityHashMap<>(); + growUsingPut(other, elementsToAdd); + map.putAll(other); + } + + static void growUsingRepeatedPutAll(IdentityHashMap map, + int elementsToAdd) { + for (int i = 0; i < elementsToAdd; i++) + map.putAll(Collections.singletonMap(new Object(), + new Object())); + } + + /** + * Checks that expected number of items can be inserted into + * the map without resizing of the internal storage + */ + @Test(dataProvider = "sizes") + public void canInsertExpectedItemsWithoutResizing(int size) + throws Throwable { + // First try growing using put() + IdentityHashMap m = new IdentityHashMap<>(size); + int initialCapacity = capacity(m); + growUsingPut(m, size); + assertCapacity(m, initialCapacity); + + // Doubling from the expected size will cause exactly one + // resize, except near minimum capacity. + if (size > 1) { + growUsingPut(m, size); + assertCapacity(m, 2 * initialCapacity); + } + + // Try again, growing with putAll() + m = new IdentityHashMap<>(size); + initialCapacity = capacity(m); + growUsingPutAll(m, size); + assertCapacity(m, initialCapacity); + + // Doubling from the expected size will cause exactly one + // resize, except near minimum capacity. + if (size > 1) { + growUsingPutAll(m, size); + assertCapacity(m, 2 * initialCapacity); + } + } + + /** + * Given the expected size, computes such a number N of items that + * inserting (N+1) items will trigger resizing of the internal storage + */ + static int threshold(int size) throws Throwable { + IdentityHashMap m = new IdentityHashMap<>(size); + int initialCapacity = capacity(m); + while (capacity(m) == initialCapacity) + growUsingPut(m, 1); + return m.size() - 1; + } + + /** + * Checks that inserting (threshold+1) item causes resizing + * of the internal storage + */ + @Test(dataProvider = "sizes") + public void passingThresholdCausesResize(int size) throws Throwable { + final int threshold = threshold(size); + IdentityHashMap m = new IdentityHashMap<>(threshold); + int initialCapacity = capacity(m); + + growUsingPut(m, threshold); + assertCapacity(m, initialCapacity); + + growUsingPut(m, 1); + assertCapacity(m, 2 * initialCapacity); + } + + /** + * Checks that 4 methods of requiring capacity lead to the same + * internal capacity, unless sized below default capacity. + */ + @Test(dataProvider = "sizes") + public void differentGrowthPatternsResultInSameCapacity(int size) + throws Throwable { + if (size < 21) // 21 is default maxExpectedSize + return; + + IdentityHashMap m; + m = new IdentityHashMap(size); + int capacity1 = capacity(m); + + m = new IdentityHashMap<>(); + growUsingPut(m, size); + int capacity2 = capacity(m); + + m = new IdentityHashMap<>(); + growUsingPutAll(m, size); + int capacity3 = capacity(m); + + m = new IdentityHashMap<>(); + growUsingRepeatedPutAll(m, size); + int capacity4 = capacity(m); + + if (capacity1 != capacity2 || + capacity2 != capacity3 || + capacity3 != capacity4) + throw new AssertionError("Capacities not equal: " + + capacity1 + " " + + capacity2 + " " + + capacity3 + " " + + capacity4); + } + + public void defaultExpectedMaxSizeIs21() { + assertCapacity(new IdentityHashMap(), 32); + assertCapacity(new IdentityHashMap(21), 32); + } + + public void minimumCapacityIs4() { + assertCapacity(new IdentityHashMap(0), 4); + assertCapacity(new IdentityHashMap(1), 4); + assertCapacity(new IdentityHashMap(2), 4); + assertCapacity(new IdentityHashMap(3), 8); + } + + @Test(enabled = false) + /** needs too much memory to run normally */ + public void maximumCapacityIs2ToThe29() { + assertCapacity(new IdentityHashMap(Integer.MAX_VALUE), + 1 << 29); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/util/logging/CheckZombieLockTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8048020 + * @author Daniel Fuchs + * @summary Regression on java.util.logging.FileHandler. + * The fix is to avoid filling up the file system with zombie lock files. + * + * @run main/othervm CheckZombieLockTest WRITABLE CLOSE CLEANUP + * @run main/othervm CheckZombieLockTest CLEANUP + * @run main/othervm CheckZombieLockTest WRITABLE + * @run main/othervm CheckZombieLockTest CREATE_FIRST + * @run main/othervm CheckZombieLockTest CREATE_NEXT + * @run main/othervm CheckZombieLockTest CREATE_NEXT + * @run main/othervm CheckZombieLockTest CLEANUP + * @run main/othervm CheckZombieLockTest REUSE + * @run main/othervm CheckZombieLockTest CLEANUP + */ +import java.io.File; +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.logging.FileHandler; +import java.util.logging.Level; +import java.util.logging.LogRecord; +public class CheckZombieLockTest { + + private static final String WRITABLE_DIR = "writable-lockfile-dir"; + private static volatile boolean supportsLocking = true; + + static enum TestCase { + WRITABLE, // just verifies that we can create a file in our 'writable-lockfile-dir' + CLOSE, // checks that closing a FileHandler removes its lock file + CREATE_FIRST, // verifies that 'writable-lockfile-dir' contains no lock, then creates a first FileHandler. + CREATE_NEXT, // verifies that 'writable-lockfile-dir' contains a single lock, then creates the next FileHandler + REUSE, // verifies that zombie lock files can be reused + CLEANUP // removes "writable-lockfile-dir" + }; + + public static void main(String... args) throws IOException { + // we'll base all file creation attempts on the system temp directory, + // %t + File writableDir = setup(); + System.out.println("Writable dir is: " + writableDir.getAbsolutePath()); + // we now have one writable directory to work with: + // writableDir + if (args == null || args.length == 0) { + args = new String[] { "WRITABLE", "CLOSE", "CLEANUP" }; + } + try { + runTests(writableDir, args); + } catch (RuntimeException | IOException | Error x) { + // some error occured: cleanup + delete(writableDir); + throw x; + } + } + + /** + * @param writableDir in which log and lock file are created + * @throws SecurityException + * @throws RuntimeException + * @throws IOException + */ + private static void runTests(File writableDir, String... args) throws SecurityException, + RuntimeException, IOException { + for (String arg : args) { + switch(TestCase.valueOf(arg)) { + // Test 1: makes sure we can create FileHandler in writable directory + case WRITABLE: checkWritable(writableDir); break; + // Test 2: verifies that FileHandler.close() cleans up its lock file + case CLOSE: testFileHandlerClose(writableDir); break; + // Test 3: creates the first file handler + case CREATE_FIRST: testFileHandlerCreate(writableDir, true); break; + // Test 4, 5, ... creates the next file handler + case CREATE_NEXT: testFileHandlerCreate(writableDir, false); break; + // Checks that zombie lock files are reused appropriatly + case REUSE: testFileHandlerReuse(writableDir); break; + // Removes the writableDir + case CLEANUP: delete(writableDir); break; + default: throw new RuntimeException("No such test case: " + arg); + } + } + } + + /** + * @param writableDir in which log and lock file are created + * @throws SecurityException + * @throws RuntimeException + * @throws IOException + */ + private static void checkWritable(File writableDir) throws SecurityException, + RuntimeException, IOException { + // Test 1: make sure we can create/delete files in the writable dir. + final File file = new File(writableDir, "test.txt"); + if (!createFile(file, false)) { + throw new IOException("Can't create " + file + "\n\tUnable to run test"); + } else { + delete(file); + } + } + + + private static FileHandler createFileHandler(File writableDir) throws SecurityException, + RuntimeException, IOException { + // Test 1: make sure we can create FileHandler in writable directory + try { + FileHandler handler = new FileHandler("%t/" + WRITABLE_DIR + "/log.log"); + handler.publish(new LogRecord(Level.INFO, handler.toString())); + handler.flush(); + return handler; + } catch (IOException ex) { + throw new RuntimeException("Test failed: should have been able" + + " to create FileHandler for " + "%t/" + WRITABLE_DIR + + "/log.log in writable directory.", ex); + } + } + + private static List listLocks(File writableDir, boolean print) + throws IOException { + List locks = new ArrayList<>(); + for (File f : writableDir.listFiles()) { + if (print) { + System.out.println("Found file: " + f.getName()); + } + if (f.getName().endsWith(".lck")) { + locks.add(f); + } + } + return locks; + } + + private static void testFileHandlerClose(File writableDir) throws IOException { + File fakeLock = new File(writableDir, "log.log.lck"); + if (!createFile(fakeLock, false)) { + throw new IOException("Can't create fake lock file: " + fakeLock); + } + try { + List before = listLocks(writableDir, true); + System.out.println("before: " + before.size() + " locks found"); + FileHandler handler = createFileHandler(writableDir); + System.out.println("handler created: " + handler); + List after = listLocks(writableDir, true); + System.out.println("after creating handler: " + after.size() + " locks found"); + handler.close(); + System.out.println("handler closed: " + handler); + List afterClose = listLocks(writableDir, true); + System.out.println("after closing handler: " + afterClose.size() + " locks found"); + afterClose.removeAll(before); + if (!afterClose.isEmpty()) { + throw new RuntimeException("Zombie lock file detected: " + afterClose); + } + } finally { + if (fakeLock.canRead()) delete(fakeLock); + } + List finalLocks = listLocks(writableDir, false); + System.out.println("After cleanup: " + finalLocks.size() + " locks found"); + } + + + private static void testFileHandlerReuse(File writableDir) throws IOException { + List before = listLocks(writableDir, true); + System.out.println("before: " + before.size() + " locks found"); + try { + if (!before.isEmpty()) { + throw new RuntimeException("Expected no lock file! Found: " + before); + } + } finally { + before.stream().forEach(CheckZombieLockTest::delete); + } + + FileHandler handler1 = createFileHandler(writableDir); + System.out.println("handler created: " + handler1); + List after = listLocks(writableDir, true); + System.out.println("after creating handler: " + after.size() + " locks found"); + if (after.size() != 1) { + throw new RuntimeException("Unexpected number of lock files found for " + + handler1 + ": " + after); + } + final File lock = after.get(0); + after.clear(); + handler1.close(); + after = listLocks(writableDir, true); + System.out.println("after closing handler: " + after.size() + " locks found"); + if (!after.isEmpty()) { + throw new RuntimeException("Unexpected number of lock files found for " + + handler1 + ": " + after); + } + if (!createFile(lock, false)) { + throw new IOException("Can't create fake lock file: " + lock); + } + try { + before = listLocks(writableDir, true); + System.out.println("before: " + before.size() + " locks found"); + if (before.size() != 1) { + throw new RuntimeException("Unexpected number of lock files found: " + + before + " expected [" + lock + "]."); + } + FileHandler handler2 = createFileHandler(writableDir); + System.out.println("handler created: " + handler2); + after = listLocks(writableDir, true); + System.out.println("after creating handler: " + after.size() + " locks found"); + after.removeAll(before); + if (!after.isEmpty()) { + throw new RuntimeException("Unexpected lock file found: " + after + + "\n\t" + lock + " should have been reused"); + } + handler2.close(); + System.out.println("handler closed: " + handler2); + List afterClose = listLocks(writableDir, true); + System.out.println("after closing handler: " + afterClose.size() + " locks found"); + if (!afterClose.isEmpty()) { + throw new RuntimeException("Zombie lock file detected: " + afterClose); + } + + if (supportsLocking) { + FileChannel fc = FileChannel.open(Paths.get(lock.getAbsolutePath()), + StandardOpenOption.CREATE_NEW, StandardOpenOption.APPEND, + StandardOpenOption.WRITE); + try { + if (fc.tryLock() != null) { + System.out.println("locked: " + lock); + handler2 = createFileHandler(writableDir); + System.out.println("handler created: " + handler2); + after = listLocks(writableDir, true); + System.out.println("after creating handler: " + after.size() + + " locks found"); + after.removeAll(before); + if (after.size() != 1) { + throw new RuntimeException("Unexpected lock files found: " + after + + "\n\t" + lock + " should not have been reused"); + } + } else { + throw new RuntimeException("Failed to lock: " + lock); + } + } finally { + delete(lock); + } + } + } finally { + List finalLocks = listLocks(writableDir, false); + System.out.println("end: " + finalLocks.size() + " locks found"); + delete(writableDir); + } + } + + + private static void testFileHandlerCreate(File writableDir, boolean first) + throws IOException { + List before = listLocks(writableDir, true); + System.out.println("before: " + before.size() + " locks found"); + try { + if (first && !before.isEmpty()) { + throw new RuntimeException("Expected no lock file! Found: " + before); + } else if (!first && before.size() != 1) { + throw new RuntimeException("Expected a single lock file! Found: " + before); + } + } finally { + before.stream().forEach(CheckZombieLockTest::delete); + } + FileHandler handler = createFileHandler(writableDir); + System.out.println("handler created: " + handler); + List after = listLocks(writableDir, true); + System.out.println("after creating handler: " + after.size() + " locks found"); + if (after.size() != 1) { + throw new RuntimeException("Unexpected number of lock files found for " + + handler + ": " + after); + } + } + + + /** + * Setup all the files and directories needed for the tests + * + * @return writable directory created that needs to be deleted when done + * @throws RuntimeException + */ + private static File setup() throws RuntimeException { + // First do some setup in the temporary directory (using same logic as + // FileHandler for %t pattern) + String tmpDir = System.getProperty("java.io.tmpdir"); // i.e. %t + if (tmpDir == null) { + tmpDir = System.getProperty("user.home"); + } + File tmpOrHomeDir = new File(tmpDir); + // Create a writable directory here (%t/writable-lockfile-dir) + File writableDir = new File(tmpOrHomeDir, WRITABLE_DIR); + if (!createFile(writableDir, true)) { + throw new RuntimeException("Test setup failed: unable to create" + + " writable working directory " + + writableDir.getAbsolutePath() ); + } + + // try to determine whether file locking is supported + final String uniqueFileName = UUID.randomUUID().toString()+".lck"; + try { + FileChannel fc = FileChannel.open(Paths.get(writableDir.getAbsolutePath(), + uniqueFileName), + StandardOpenOption.CREATE_NEW, StandardOpenOption.APPEND, + StandardOpenOption.DELETE_ON_CLOSE); + try { + fc.tryLock(); + } catch(IOException x) { + supportsLocking = false; + } finally { + fc.close(); + } + } catch (IOException t) { + // should not happen + System.err.println("Failed to create new file " + uniqueFileName + + " in " + writableDir.getAbsolutePath()); + throw new RuntimeException("Test setup failed: unable to run test", t); + } + return writableDir; + } + + /** + * @param newFile + * @return true if file already exists or creation succeeded + */ + private static boolean createFile(File newFile, boolean makeDirectory) { + if (newFile.exists()) { + return true; + } + if (makeDirectory) { + return newFile.mkdir(); + } else { + try { + return newFile.createNewFile(); + } catch (IOException ioex) { + ioex.printStackTrace(); + return false; + } + } + } + + /* + * Recursively delete all files starting at specified file + */ + private static void delete(File f) { + if (f != null && f.isDirectory()) { + for (File c : f.listFiles()) + delete(c); + } + if (!f.delete()) + System.err.println( + "WARNING: unable to delete/cleanup writable test directory: " + + f ); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/util/logging/FileHandlerPath.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,315 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FilePermission; +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Paths; +import static java.nio.file.StandardOpenOption.CREATE_NEW; +import static java.nio.file.StandardOpenOption.WRITE; +import java.security.CodeSource; +import java.security.Permission; +import java.security.PermissionCollection; +import java.security.Permissions; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.util.Arrays; +import java.util.Collections; +import java.util.Enumeration; +import java.util.List; +import java.util.Properties; +import java.util.PropertyPermission; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.FileHandler; +import java.util.logging.LogManager; +import java.util.logging.LoggingPermission; + +/** + * @test + * @bug 8059269 + * @summary tests that using a simple (non composite) pattern does not lead + * to NPE when the lock file already exists. + * @run main/othervm FileHandlerPath UNSECURE + * @run main/othervm FileHandlerPath SECURE + * @author danielfuchs + */ +public class FileHandlerPath { + + /** + * We will test the simple pattern in two configurations. + * UNSECURE: No security manager. + * SECURE: With the security manager present - and the required + * permissions granted. + */ + public static enum TestCase { + UNSECURE, SECURE; + public void run(Properties propertyFile) throws Exception { + System.out.println("Running test case: " + name()); + Configure.setUp(this, propertyFile); + test(this.name() + " " + propertyFile.getProperty("test.name"), propertyFile); + } + } + + + // Use a random name provided by UUID to avoid collision with other tests + final static String logFile = FileHandlerPath.class.getSimpleName() + "_" + + UUID.randomUUID().toString() + ".log"; + final static String tmpLogFile; + final static String userDir = System.getProperty("user.dir"); + final static String tmpDir = System.getProperty("java.io.tmpdir"); + private static final List properties; + static { + tmpLogFile = new File(tmpDir, logFile).toString(); + Properties props1 = new Properties(); + Properties props2 = new Properties(); + props1.setProperty("test.name", "relative file"); + props1.setProperty("test.file.name", logFile); + props1.setProperty(FileHandler.class.getName() + ".pattern", logFile); + props1.setProperty(FileHandler.class.getName() + ".count", "1"); + props2.setProperty("test.name", "absoluste file"); + props2.setProperty("test.file.name", tmpLogFile); + props2.setProperty(FileHandler.class.getName() + ".pattern", "%t/" + logFile); + props2.setProperty(FileHandler.class.getName() + ".count", "1"); + properties = Collections.unmodifiableList(Arrays.asList( + props1, + props2)); + } + + public static void main(String... args) throws Exception { + + if (args == null || args.length == 0) { + args = new String[] { + TestCase.UNSECURE.name(), + TestCase.SECURE.name(), + }; + } + + // Sanity checks + + if (!Files.isWritable(Paths.get(userDir))) { + throw new RuntimeException(userDir + + ": user.dir is not writable - can't run test."); + } + if (!Files.isWritable(Paths.get(tmpDir))) { + throw new RuntimeException(tmpDir + + ": java.io.tmpdir is not writable - can't run test."); + } + + File[] files = { + new File(logFile), + new File(tmpLogFile), + new File(logFile+".1"), + new File(tmpLogFile+".1"), + new File(logFile+".lck"), + new File(tmpLogFile+".lck"), + new File(logFile+".1.lck"), + new File(tmpLogFile+".1.lck") + }; + + for (File log : files) { + if (log.exists()) { + throw new Exception(log +": file already exists - can't run test."); + } + } + + // Now start the real test + + try { + for (String testName : args) { + for (Properties propertyFile : properties) { + TestCase test = TestCase.valueOf(testName); + test.run(propertyFile); + } + } + } finally { + // Cleanup... + Configure.doPrivileged(() -> { + for(File log : files) { + try { + final boolean isLockFile = log.getName().endsWith(".lck"); + // lock file should already be deleted, except if the + // test failed in exception. + // log file should all be present, except if the test + // failed in exception. + if (log.exists()) { + if (!isLockFile) { + System.out.println("deleting "+log.toString()); + } else { + System.err.println("deleting lock file "+log.toString()); + } + log.delete(); + } else { + if (!isLockFile) { + System.err.println(log.toString() + ": not found."); + } + } + } catch (Throwable t) { + // should not happen + t.printStackTrace(); + } + } + }); + } + } + + static class Configure { + static Policy policy = null; + static final AtomicBoolean allowAll = new AtomicBoolean(false); + static void setUp(TestCase test, Properties propertyFile) { + switch (test) { + case SECURE: + if (policy == null && System.getSecurityManager() != null) { + throw new IllegalStateException("SecurityManager already set"); + } else if (policy == null) { + policy = new SimplePolicy(TestCase.SECURE, allowAll); + Policy.setPolicy(policy); + System.setSecurityManager(new SecurityManager()); + } + if (System.getSecurityManager() == null) { + throw new IllegalStateException("No SecurityManager."); + } + if (policy == null) { + throw new IllegalStateException("policy not configured"); + } + break; + case UNSECURE: + if (System.getSecurityManager() != null) { + throw new IllegalStateException("SecurityManager already set"); + } + break; + default: + new InternalError("No such testcase: " + test); + } + doPrivileged(() -> { + try { + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + propertyFile.store(bytes, propertyFile.getProperty("test.name")); + ByteArrayInputStream bais = new ByteArrayInputStream(bytes.toByteArray()); + LogManager.getLogManager().readConfiguration(bais); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + }); + } + static void doPrivileged(Runnable run) { + allowAll.set(true); + try { + run.run(); + } finally { + allowAll.set(false); + } + } + } + + public static void test(String name, Properties props) throws Exception { + System.out.println("Testing: " + name); + String file = props.getProperty("test.file.name"); + // create the lock files first - in order to take the path that + // used to trigger the NPE + Files.createFile(Paths.get(file + ".lck")); + Files.createFile(Paths.get(file + ".1.lck")); + final FileHandler f1 = new FileHandler(); + final FileHandler f2 = new FileHandler(); + f1.close(); + f2.close(); + System.out.println("Success for " + name); + } + + + final static class PermissionsBuilder { + final Permissions perms; + public PermissionsBuilder() { + this(new Permissions()); + } + public PermissionsBuilder(Permissions perms) { + this.perms = perms; + } + public PermissionsBuilder add(Permission p) { + perms.add(p); + return this; + } + public PermissionsBuilder addAll(PermissionCollection col) { + if (col != null) { + for (Enumeration e = col.elements(); e.hasMoreElements(); ) { + perms.add(e.nextElement()); + } + } + return this; + } + public Permissions toPermissions() { + final PermissionsBuilder builder = new PermissionsBuilder(); + builder.addAll(perms); + return builder.perms; + } + } + + public static class SimplePolicy extends Policy { + + final Permissions permissions; + final Permissions allPermissions; + final AtomicBoolean allowAll; + public SimplePolicy(TestCase test, AtomicBoolean allowAll) { + this.allowAll = allowAll; + permissions = new Permissions(); + permissions.add(new LoggingPermission("control", null)); // needed by new FileHandler() + permissions.add(new FilePermission("<>", "read")); // needed by new FileHandler() + permissions.add(new FilePermission(logFile, "write,delete")); // needed by new FileHandler() + permissions.add(new FilePermission(logFile+".lck", "write,delete")); // needed by FileHandler.close() + permissions.add(new FilePermission(logFile+".1", "write,delete")); // needed by new FileHandler() + permissions.add(new FilePermission(logFile+".1.lck", "write,delete")); // needed by FileHandler.close() + permissions.add(new FilePermission(tmpLogFile, "write,delete")); // needed by new FileHandler() + permissions.add(new FilePermission(tmpLogFile+".lck", "write,delete")); // needed by FileHandler.close() + permissions.add(new FilePermission(tmpLogFile+".1", "write,delete")); // needed by new FileHandler() + permissions.add(new FilePermission(tmpLogFile+".1.lck", "write,delete")); // needed by FileHandler.close() + permissions.add(new FilePermission(userDir, "write")); // needed by new FileHandler() + permissions.add(new FilePermission(tmpDir, "write")); // needed by new FileHandler() + permissions.add(new PropertyPermission("user.dir", "read")); + permissions.add(new PropertyPermission("java.io.tmpdir", "read")); + allPermissions = new Permissions(); + allPermissions.add(new java.security.AllPermission()); + } + + @Override + public boolean implies(ProtectionDomain domain, Permission permission) { + if (allowAll.get()) return allPermissions.implies(permission); + return permissions.implies(permission); + } + + @Override + public PermissionCollection getPermissions(CodeSource codesource) { + return new PermissionsBuilder().addAll(allowAll.get() + ? allPermissions : permissions).toPermissions(); + } + + @Override + public PermissionCollection getPermissions(ProtectionDomain domain) { + return new PermissionsBuilder().addAll(allowAll.get() + ? allPermissions : permissions).toPermissions(); + } + } + +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/java/util/logging/LogManagerAppContextDeadlock.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,370 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.security.CodeSource; +import java.security.Permission; +import java.security.PermissionCollection; +import java.security.Permissions; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.util.Enumeration; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.LogManager; +import java.util.logging.Logger; + +/** + * @test + * @bug 8065991 + * @summary check that when LogManager is initialized, a deadlock similar + * to that described in 8065709 will not occur. + * @run main/othervm LogManagerAppContextDeadlock UNSECURE + * @run main/othervm LogManagerAppContextDeadlock SECURE + * + * @author danielfuchs + */ +public class LogManagerAppContextDeadlock { + + public static final Semaphore sem = new Semaphore(0); + public static final Semaphore sem2 = new Semaphore(0); + public static final Semaphore sem3 = new Semaphore(-2); + public static volatile boolean goOn = true; + public static volatile Exception thrown; + + // Emulate EventQueue + static class FakeEventQueue { + static final Logger logger = Logger.getLogger("foo"); + } + + // Emulate AppContext + static class FakeAppContext { + + final static AtomicInteger numAppContexts = new AtomicInteger(0); + static final class FakeAppContextLock {} + static final FakeAppContextLock lock = new FakeAppContextLock(); + static volatile FakeAppContext appContext; + + final FakeEventQueue queue; + FakeAppContext() { + appContext = this; + numAppContexts.incrementAndGet(); + // release sem2 to let Thread t2 call Logger.getLogger(). + sem2.release(); + try { + // Wait until we JavaAWTAccess is called by LogManager. + // Thread 2 will call Logger.getLogger() which will + // trigger a call to JavaAWTAccess - which will release + // sem, thus ensuring that Thread #2 is where we want it. + sem.acquire(); + System.out.println("Sem acquired: Thread #2 has called JavaAWTAccess"); + } catch(InterruptedException x) { + Thread.interrupted(); + } + queue = new FakeEventQueue(); + } + + static FakeAppContext getAppContext() { + synchronized (lock) { + if (numAppContexts.get() == 0) { + return new FakeAppContext(); + } + return appContext; + } + } + + static { + sun.misc.SharedSecrets.setJavaAWTAccess(new sun.misc.JavaAWTAccess() { + @Override + public Object getAppletContext() { + if (numAppContexts.get() == 0) return null; + // We are in JavaAWTAccess, we can release sem and let + // FakeAppContext constructor proceeed. + System.out.println("Releasing Sem"); + sem.release(); + return getAppContext(); + } + + }); + } + + } + + + // Test with or without a security manager + public static enum TestCase { + UNSECURE, SECURE; + public void run() throws Exception { + System.out.println("Running test case: " + name()); + Configure.setUp(this); + test(this); + } + } + + public static void test(TestCase test) throws Exception { + Thread t1 = new Thread() { + @Override + public void run() { + sem3.release(); + System.out.println("FakeAppContext.getAppContext()"); + FakeAppContext.getAppContext(); + System.out.println("Done: FakeAppContext.getAppContext()"); + } + }; + t1.setDaemon(true); + t1.start(); + Thread t2 = new Thread() { + public void run() { + sem3.release(); + try { + // Wait until Thread1 is in FakeAppContext constructor + sem2.acquire(); + System.out.println("Sem2 acquired: Thread #1 will be waiting to acquire Sem"); + } catch (InterruptedException ie) { + Thread.interrupted(); + } + System.out.println("Logger.getLogger(name).info(name)"); + Logger.getLogger(test.name());//.info(name); + System.out.println("Done: Logger.getLogger(name).info(name)"); + } + }; + t2.setDaemon(true); + t2.start(); + System.out.println("Should exit now..."); + Thread detector = new DeadlockDetector(); + detector.start(); + + // Wait for the 3 threads to start + sem3.acquire(); + + // Now wait for t1 & t2 to finish, or for a deadlock to be detected. + while (goOn && (t1.isAlive() || t2.isAlive())) { + if (t2.isAlive()) t2.join(1000); + if (test == TestCase.UNSECURE && System.getSecurityManager() == null) { + // if there's no security manager, AppContext.getAppContext() is + // not called - so Thread t2 will not end up calling + // sem.release(). In that case we must release the semaphore here + // so that t1 can proceed. + if (LogManager.getLogManager().getLogger(TestCase.UNSECURE.name()) != null) { + // means Thread t2 has created the logger + sem.release(); + } + } + if (t1.isAlive()) t1.join(1000); + } + if (thrown != null) { + throw thrown; + } + } + + // Thrown by the deadlock detector + static final class DeadlockException extends RuntimeException { + public DeadlockException(String message) { + super(message); + } + @Override + public void printStackTrace() { + } + } + + public static void main(String[] args) throws Exception { + + if (args.length == 0) { + args = new String[] { "SECURE" }; + } + + // If we don't initialize LogManager here, there will be + // a deadlock. + // See + // for more details. + Logger.getLogger("main").info("starting..."); + try { + TestCase.valueOf(args[0]).run(); + System.out.println("Test "+args[0]+" Passed"); + } catch(Throwable t) { + System.err.println("Test " + args[0] +" failed: " + t); + t.printStackTrace(); + } + } + + // Called by the deadlock detector when a deadlock is found. + static void fail(Exception x) { + x.printStackTrace(); + if (thrown == null) { + thrown = x; + } + goOn = false; + } + + // A thread that detect deadlocks. + final static class DeadlockDetector extends Thread { + + public DeadlockDetector() { + this.setDaemon(true); + } + + @Override + public void run() { + sem3.release(); + Configure.doPrivileged(this::loop); + } + public void loop() { + while(goOn) { + try { + long[] ids = ManagementFactory.getThreadMXBean().findDeadlockedThreads(); + ids = ids == null ? new long[0] : ids; + if (ids.length == 1) { + throw new RuntimeException("Found 1 deadlocked thread: "+ids[0]); + } else if (ids.length > 0) { + ThreadInfo[] infos = ManagementFactory.getThreadMXBean().getThreadInfo(ids, Integer.MAX_VALUE); + System.err.println("Found "+ids.length+" deadlocked threads: "); + for (ThreadInfo inf : infos) { + System.err.println(inf); + } + throw new DeadlockException("Found "+ids.length+" deadlocked threads"); + } + Thread.sleep(100); + } catch(InterruptedException | RuntimeException x) { + fail(x); + } + } + } + + } + + // A helper class to configure the security manager for the test, + // and bypass it when needed. + static class Configure { + static Policy policy = null; + static final ThreadLocal allowAll = new ThreadLocal() { + @Override + protected AtomicBoolean initialValue() { + return new AtomicBoolean(false); + } + }; + static void setUp(TestCase test) { + switch (test) { + case SECURE: + if (policy == null && System.getSecurityManager() != null) { + throw new IllegalStateException("SecurityManager already set"); + } else if (policy == null) { + policy = new SimplePolicy(TestCase.SECURE, allowAll); + Policy.setPolicy(policy); + System.setSecurityManager(new SecurityManager()); + } + if (System.getSecurityManager() == null) { + throw new IllegalStateException("No SecurityManager."); + } + if (policy == null) { + throw new IllegalStateException("policy not configured"); + } + break; + case UNSECURE: + if (System.getSecurityManager() != null) { + throw new IllegalStateException("SecurityManager already set"); + } + break; + default: + new InternalError("No such testcase: " + test); + } + } + static void doPrivileged(Runnable run) { + allowAll.get().set(true); + try { + run.run(); + } finally { + allowAll.get().set(false); + } + } + } + + // A Helper class to build a set of permissions. + final static class PermissionsBuilder { + final Permissions perms; + public PermissionsBuilder() { + this(new Permissions()); + } + public PermissionsBuilder(Permissions perms) { + this.perms = perms; + } + public PermissionsBuilder add(Permission p) { + perms.add(p); + return this; + } + public PermissionsBuilder addAll(PermissionCollection col) { + if (col != null) { + for (Enumeration e = col.elements(); e.hasMoreElements(); ) { + perms.add(e.nextElement()); + } + } + return this; + } + public Permissions toPermissions() { + final PermissionsBuilder builder = new PermissionsBuilder(); + builder.addAll(perms); + return builder.perms; + } + } + + // Policy for the test... + public static class SimplePolicy extends Policy { + + final Permissions permissions; + final Permissions allPermissions; + final ThreadLocal allowAll; // actually: this should be in a thread locale + public SimplePolicy(TestCase test, ThreadLocal allowAll) { + this.allowAll = allowAll; + // we don't actually need any permission to create our + // FileHandlers because we're passing invalid parameters + // which will make the creation fail... + permissions = new Permissions(); + permissions.add(new RuntimePermission("accessClassInPackage.sun.misc")); + + // these are used for configuring the test itself... + allPermissions = new Permissions(); + allPermissions.add(new java.security.AllPermission()); + + } + + @Override + public boolean implies(ProtectionDomain domain, Permission permission) { + if (allowAll.get().get()) return allPermissions.implies(permission); + return permissions.implies(permission); + } + + @Override + public PermissionCollection getPermissions(CodeSource codesource) { + return new PermissionsBuilder().addAll(allowAll.get().get() + ? allPermissions : permissions).toPermissions(); + } + + @Override + public PermissionCollection getPermissions(ProtectionDomain domain) { + return new PermissionsBuilder().addAll(allowAll.get().get() + ? allPermissions : permissions).toPermissions(); + } + } + +} --- ./jdk/test/java/util/logging/LoggingDeadlock2.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/util/logging/LoggingDeadlock2.java Wed Feb 04 12:14:43 2015 -0800 @@ -28,7 +28,7 @@ * @author Serguei Spitsyn / Hitachi / Martin Buchholz * * @build LoggingDeadlock2 - * @run main/timeout=15 LoggingDeadlock2 + * @run main LoggingDeadlock2 * * There is a clear deadlock between LogManager. and * Cleaner.run() methods. --- ./jdk/test/java/util/logging/TestLoggerBundleSync.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/java/util/logging/TestLoggerBundleSync.java Wed Feb 04 12:14:43 2015 -0800 @@ -58,6 +58,7 @@ */ public class TestLoggerBundleSync { + static final boolean VERBOSE = false; static volatile Exception thrown = null; static volatile boolean goOn = true; @@ -65,6 +66,7 @@ static final long TIME = 4 * 1000; // 4 sec. static final long STEP = 1 * 1000; // message every 1 sec. static final int LCOUNT = 50; // change bundle 50 times... + static final AtomicLong ignoreLogCount = new AtomicLong(0); static final AtomicLong setRBcount = new AtomicLong(0); static final AtomicLong setRBNameCount = new AtomicLong(0); static final AtomicLong getRBcount = new AtomicLong(0); @@ -150,6 +152,7 @@ long sSetRBNameCount = setRBNameCount.get(); long sCheckCount = checkCount.get(); long sNextLong = nextLong.get(); + long sIgnoreLogCount = ignoreLogCount.get(); List threads = new ArrayList<>(); for (Class type : classes) { threads.add(new SetRB(type)); @@ -181,21 +184,58 @@ + " resource bundles set by " + classes.size() + " Thread(s),"); System.out.println("\t " + (setRBNameCount.get() - sSetRBNameCount) + " resource bundle names set by " + classes.size() + " Thread(s),"); + System.out.println("\t " + (ignoreLogCount.get() - sIgnoreLogCount) + + " log messages emitted by other GetRB threads were ignored" + + " to ensure MT test consistency,"); System.out.println("\t ThreadMXBean.findDeadlockedThreads called " + (checkCount.get() -sCheckCount) + " times by 1 Thread."); } final static class GetRB extends Thread { - final static class MyHandler extends Handler { + final class MyHandler extends Handler { volatile ResourceBundle rb; volatile String rbName; volatile int count = 0; @Override public synchronized void publish(LogRecord record) { - count++; - rb = record.getResourceBundle(); - rbName = record.getResourceBundleName(); + Object[] params = record.getParameters(); + // Each GetRB thread has its own handler, but since they + // log into the same logger, each handler may receive + // messages emitted by other threads. + // This means that GetRB#2.handler may receive a message + // emitted by GetRB#1 at a time where the resource bundle + // was still null. + // To avoid falling into this trap, the GetRB thread passes + // 'this' as argument to the messages it logs - which does + // allow us here to ignore messages that where not emitted + // by our own GetRB.this thread... + if (params.length == 1) { + if (params[0] == GetRB.this) { + // The message was emitted by our thread. + count++; + rb = record.getResourceBundle(); + rbName = record.getResourceBundleName(); + } else { + // The message was emitted by another thread: just + // ignore it, as it may have been emitted at a time + // where the resource bundle was still null, and + // processing it may overwrite the 'rb' and 'rbName' + // recorded from the message emitted by our own thread. + if (VERBOSE) { + System.out.println("Ignoring message logged by " + params[0]); + } + ignoreLogCount.incrementAndGet(); + } + } else { + ignoreLogCount.incrementAndGet(); + System.err.println("Unexpected message received"); + } + } + + void reset() { + rbName = null; + rb = null; } @Override @@ -207,6 +247,7 @@ } }; final MyHandler handler = new MyHandler(); + @Override public void run() { try { @@ -234,9 +275,10 @@ + handler.getLevel()); } final int countBefore = handler.count; + handler.reset(); ll.setLevel(Level.FINEST); ll.addHandler(handler); - ll.fine("dummy"); + ll.log(Level.FINE, "dummy {0}", this); ll.removeHandler(handler); final int countAfter = handler.count; if (countBefore == countAfter) { --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/imageio/plugins/png/PngDitDepthTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* +* @test +* @bug 4991647 +* @summary PNGMetadata.getAsTree() sets bitDepth to invalid value +* @run main PngDitDepthTest +*/ + +import org.w3c.dom.Node; + +import javax.imageio.ImageIO; +import javax.imageio.ImageTypeSpecifier; +import javax.imageio.ImageWriter; +import javax.imageio.metadata.IIOInvalidTreeException; +import javax.imageio.metadata.IIOMetadata; +import java.awt.image.ColorModel; +import java.awt.image.SampleModel; +import java.util.Iterator; + +public class PngDitDepthTest { + + public static void main(String[] args) throws IIOInvalidTreeException { + + // getting the writer for the png format + Iterator iter = ImageIO.getImageWritersByFormatName("png"); + ImageWriter writer = (ImageWriter) iter.next(); + + // creating a color model + ColorModel colorModel = ColorModel.getRGBdefault(); + + // creating a sample model + SampleModel sampleModel = colorModel.createCompatibleSampleModel(640, 480); + + // creating a default metadata object + IIOMetadata metaData = writer.getDefaultImageMetadata(new ImageTypeSpecifier(colorModel, sampleModel), null); + String formatName = metaData.getNativeMetadataFormatName(); + + // first call + Node metaDataNode = metaData.getAsTree(formatName); + try { + metaData.setFromTree(formatName, metaDataNode); + } catch (Exception ex) { + ex.printStackTrace(); + } + + // second call (bitdepht is already set to an invalid value) + metaDataNode = metaData.getAsTree(formatName); + + metaData.setFromTree(formatName, metaDataNode); + + } +} --- ./jdk/test/javax/management/MBeanServer/MBeanFallbackTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/management/MBeanServer/MBeanFallbackTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -35,7 +35,7 @@ * @author Jaroslav Bachorik * @run clean MBeanFallbackTest * @run build MBeanFallbackTest - * @run main MBeanFallbackTest + * @run main/othervm -Djdk.jmx.mbeans.allowNonPublic=true MBeanFallbackTest */ public class MBeanFallbackTest { private static interface PrivateMBean { @@ -51,7 +51,6 @@ private static int failures = 0; public static void main(String[] args) throws Exception { - System.setProperty("jdk.jmx.mbeans.allowNonPublic", "true"); testPrivate(PrivateMBean.class, new Private()); if (failures == 0) --- ./jdk/test/javax/management/ObjectName/SerialCompatTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/management/ObjectName/SerialCompatTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -28,7 +28,7 @@ * @author Eamonn McManus, Daniel Fuchs * @run clean SerialCompatTest * @run build SerialCompatTest - * @run main/othervm SerialCompatTest + * @run main/othervm -Djdk.jmx.mbeans.allowNonPublic=true -Djmx.serial.form=1.0 SerialCompatTest */ import java.io.*; @@ -223,8 +223,6 @@ } public static void main(String[] args) throws Exception { - System.setProperty("jmx.serial.form", "1.0"); - /* Check that we really are in jmx.serial.form=1.0 mode. The property is frozen the first time the ObjectName class is referenced so checking that it is set to the correct --- ./jdk/test/javax/management/monitor/AttributeArbitraryDataTypeTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/management/monitor/AttributeArbitraryDataTypeTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -58,9 +58,9 @@ public class AttributeArbitraryDataTypeTest implements NotificationListener { // Flag to notify that a message has been received - private boolean counterMessageReceived = false; - private boolean gaugeMessageReceived = false; - private boolean stringMessageReceived = false; + private volatile boolean counterMessageReceived = false; + private volatile boolean gaugeMessageReceived = false; + private volatile boolean stringMessageReceived = false; // Match enum public enum Match { do_not_match_0, @@ -195,21 +195,33 @@ " has reached or exceeded the threshold"); echo("\t\tDerived Gauge = " + n.getDerivedGauge()); echo("\t\tTrigger = " + n.getTrigger()); - counterMessageReceived = true; + + synchronized (this) { + counterMessageReceived = true; + notifyAll(); + } } else if (type.equals(MonitorNotification. THRESHOLD_HIGH_VALUE_EXCEEDED)) { echo("\t\t" + n.getObservedAttribute() + " has reached or exceeded the high threshold"); echo("\t\tDerived Gauge = " + n.getDerivedGauge()); echo("\t\tTrigger = " + n.getTrigger()); - gaugeMessageReceived = true; + + synchronized (this) { + gaugeMessageReceived = true; + notifyAll(); + } } else if (type.equals(MonitorNotification. STRING_TO_COMPARE_VALUE_MATCHED)) { echo("\t\t" + n.getObservedAttribute() + " matches the string-to-compare value"); echo("\t\tDerived Gauge = " + n.getDerivedGauge()); echo("\t\tTrigger = " + n.getTrigger()); - stringMessageReceived = true; + + synchronized (this) { + stringMessageReceived = true; + notifyAll(); + } } else { echo("\t\tSkipping notification of type: " + type); } @@ -358,6 +370,17 @@ // Check if notification was received // + synchronized (this) { + while (!counterMessageReceived) { + try { + wait(); + } catch (InterruptedException e) { + System.err.println("Got unexpected exception: " + e); + e.printStackTrace(); + break; + } + } + } if (counterMessageReceived) { echo("\tOK: CounterMonitor notification received"); } else { @@ -525,6 +548,17 @@ // Check if notification was received // + synchronized (this) { + while (!gaugeMessageReceived) { + try { + wait(); + } catch (InterruptedException e) { + System.err.println("Got unexpected exception: " + e); + e.printStackTrace(); + break; + } + } + } if (gaugeMessageReceived) { echo("\tOK: GaugeMonitor notification received"); } else { @@ -680,6 +714,17 @@ // Check if notification was received // + synchronized (this) { + while (!stringMessageReceived) { + try { + wait(); + } catch (InterruptedException e) { + System.err.println("Got unexpected exception: " + e); + e.printStackTrace(); + break; + } + } + } if (stringMessageReceived) { echo("\tOK: StringMonitor notification received"); } else { --- ./jdk/test/javax/management/monitor/CounterMonitorTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/management/monitor/CounterMonitorTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ * @bug 4981829 * @summary Test that the counter monitor, when running in difference mode, * emits a notification every time the threshold is exceeded. - * @author Luis-Miguel Alventosa + * @author Luis-Miguel Alventosa, Shanliang JIANG * @run clean CounterMonitorTest * @run build CounterMonitorTest * @run main CounterMonitorTest @@ -43,9 +43,6 @@ // modulus number private Number modulus = new Integer(7); - // offset number - private int offset = 0; - // difference mode flag private boolean differenceModeFlag = true; @@ -53,26 +50,31 @@ private boolean notifyFlag = true; // granularity period - private int granularityperiod = 500; + private int granularityperiod = 10; - // counter values - private int[] values = new int[] {4, 6, 9, 11}; - - // time to wait for notification (in seconds) - private int timeout = 5; + // derived gauge + private volatile int derivedGauge = 2; // flag to notify that a message has been received private volatile boolean messageReceived = false; + private volatile Object observedValue = null; + // MBean class public class StdObservedObject implements StdObservedObjectMBean { public Object getNbObjects() { + echo(">>> StdObservedObject.getNbObjects: " + count); + synchronized(CounterMonitorTest.class) { + observedValue = count; + CounterMonitorTest.class.notifyAll(); + } return count; } public void setNbObjects(Object n) { + echo(">>> StdObservedObject.setNbObjects: " + n); count = n; } - private Object count= null; + private volatile Object count= null; } // MBean interface @@ -92,8 +94,9 @@ echo("\t\t" + n.getObservedAttribute() + " has reached or exceeded the threshold"); echo("\t\tDerived Gauge = " + n.getDerivedGauge()); - messageReceived = true; + synchronized (this) { + messageReceived = true; notifyAll(); } } else { @@ -171,18 +174,18 @@ Attribute attrib = new Attribute("NbObjects", data); server.setAttribute(stdObsObjName, attrib); - // Wait for granularity period (multiplied by 2 for sure) - // - Thread.sleep(granularityperiod * 2); + waitObservation(data); // Loop through the values // - for (int i = 0; i < values.length; i++) { - data = new Integer(values[i]); - echo(">>> Set data = " + data.intValue()); + while (derivedGauge++ < 10) { + System.out.print(">>> Set data from " + data.intValue()); + data = new Integer(data.intValue() + derivedGauge); + echo(" to " + data.intValue()); attrib = new Attribute("NbObjects", data); server.setAttribute(stdObsObjName, attrib); + waitObservation(data); echo("\tdoWait in Counter Monitor"); doWait(); @@ -205,21 +208,30 @@ } /* - * Wait until timeout reached + * Wait messageReceived to be true */ - void doWait() { - for (int i = 0; i < timeout; i++) { - echo("\tdoWait: Waiting for " + timeout + " seconds. " + - "i = " + i + ", messageReceived = " + messageReceived); - if (messageReceived) { + synchronized void doWait() { + while (!messageReceived) { + try { + wait(); + } catch (InterruptedException e) { + System.err.println("Got unexpected exception: " + e); + e.printStackTrace(); break; } - try { - synchronized (this) { - wait(1000); + } + } + + private void waitObservation(Object value) { + synchronized (CounterMonitorTest.class) { + while (value != observedValue) { + try { + CounterMonitorTest.class.wait(); + } catch (InterruptedException e) { + System.err.println("Got unexpected exception: " + e); + e.printStackTrace(); + break; } - } catch (InterruptedException e) { - // OK: Ignore... } } } --- ./jdk/test/javax/management/monitor/GaugeMonitorDeadlockTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/management/monitor/GaugeMonitorDeadlockTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -36,8 +36,9 @@ */ import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; import java.util.concurrent.atomic.AtomicInteger; -import javax.management.Attribute; import javax.management.JMX; import javax.management.MBeanServer; import javax.management.Notification; @@ -47,10 +48,16 @@ import javax.management.monitor.GaugeMonitorMBean; public class GaugeMonitorDeadlockTest { + private static enum When {IN_GET_ATTRIBUTE, IN_NOTIFY}; + private static long checkingTime; public static void main(String[] args) throws Exception { if (args.length != 1) throw new Exception("Arg should be test number"); + double factor = Double.parseDouble(System.getProperty("test.timeout.factor", "1.0")); + checkingTime = (long)factor*1000; + System.out.println("=== checkingTime = " + checkingTime + "ms"); + int testNo = Integer.parseInt(args[0]) - 1; TestCase test = testCases[testNo]; System.out.println("Test: " + test.getDescription()); @@ -58,8 +65,6 @@ System.out.println("Test passed"); } - private static enum When {IN_GET_ATTRIBUTE, IN_NOTIFY}; - private static abstract class TestCase { TestCase(String description, When when) { this.description = description; @@ -98,16 +103,29 @@ monitorProxy.setNotifyLow(true); monitorProxy.start(); + System.out.println("=== Waiting observedProxy.getGetCount() to be " + + "changed, presumable deadlock if timeout?"); final int initGetCount = observedProxy.getGetCount(); - int getCount = initGetCount; - for (int i = 0; i < 2000; i++) { // 2000 * 10 = 20 seconds - getCount = observedProxy.getGetCount(); - if (getCount != initGetCount) - break; - Thread.sleep(10); + long checkedTime = System.currentTimeMillis(); + long nowTime; + ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean(); + while (observedProxy.getGetCount() == initGetCount) { + Thread.sleep(100); + + nowTime = System.currentTimeMillis(); + if (nowTime - checkedTime >= checkingTime) { + System.out.println("=== Checking deadlocked ..."); + if (threadMXBean.findDeadlockedThreads() != null) { + for (ThreadInfo info : threadMXBean.dumpAllThreads(true, true)) { + System.out.println(info); + } + throw new Error("Found deadlocked threads: " + + threadMXBean.findDeadlockedThreads().length); + } + checkedTime = System.currentTimeMillis(); + } } - if (getCount <= initGetCount) - throw new Exception("Test failed: presumable deadlock"); + // This won't show up as a deadlock in CTRL-\ or in // ThreadMXBean.findDeadlockedThreads(), because they don't // see that thread A is waiting for thread B (B.join()), and @@ -117,13 +135,13 @@ // so if we want to test notify behaviour we can trigger by // exceeding the threshold. if (when == When.IN_NOTIFY) { + final Thread testedThread = new Thread(sensitiveThing); final AtomicInteger notifCount = new AtomicInteger(); final NotificationListener listener = new NotificationListener() { public void handleNotification(Notification n, Object h) { - Thread t = new Thread(sensitiveThing); - t.start(); + testedThread.start(); try { - t.join(); + testedThread.join(); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -132,12 +150,36 @@ }; mbs.addNotificationListener(monitorName, listener, null, null); observedProxy.setThing(1000); - for (int i = 0; i < 2000 && notifCount.get() == 0; i++) - Thread.sleep(10); - if (notifCount.get() == 0) - throw new Exception("Test failed: presumable deadlock"); + System.out.println("=== Waiting notifications, presumable " + + "deadlock if timeout?"); + long startTime = System.currentTimeMillis(); + checkedTime = startTime; + while (notifCount.get() == 0) { + Thread.sleep(100); + + nowTime = System.currentTimeMillis(); + if (nowTime - checkedTime >= checkingTime) { + System.out.println("=== Checking the thread state ..."); + if (testedThread.isAlive()) { + System.out.println("=== Waiting testedThread to die " + + "after " + (nowTime - startTime) + "ms"); + + ThreadInfo tinfo = threadMXBean.getThreadInfo(testedThread.getId()); + if (Thread.State.BLOCKED.equals(tinfo.getThreadState())) { + for (ThreadInfo info : threadMXBean.dumpAllThreads(true, true)) { + System.out.println(info); + } + } else { + System.out.println(tinfo); + } + } else { + System.out.println("=== The testedThread is dead as wished, " + + "the test must be passed soon."); + } + checkedTime = System.currentTimeMillis(); + } + } } - } abstract void doSensitiveThing(GaugeMonitorMBean monitorProxy, --- ./jdk/test/javax/management/monitor/NonComparableAttributeValueTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/management/monitor/NonComparableAttributeValueTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -39,7 +39,7 @@ public class NonComparableAttributeValueTest implements NotificationListener { // Flag to notify that a message has been received - private boolean messageReceived = false; + private volatile boolean messageReceived = false; // MBean class public class ObservedObject implements ObservedObjectMBean { @@ -69,7 +69,11 @@ echo("\t\t" + n.getObservedAttribute() + " is null"); echo("\t\tDerived Gauge = " + n.getDerivedGauge()); echo("\t\tTrigger = " + n.getTrigger()); - messageReceived = true; + + synchronized (this) { + messageReceived = true; + notifyAll(); + } } else { echo("\t\tSkipping notification of type: " + type); } @@ -134,12 +138,9 @@ echo(">>> START the CounterMonitor"); counterMonitor.start(); - // Wait for granularity period (multiplied by 2 for sure) - // - Thread.sleep(granularityperiod * 2); - // Check if notification was received // + doWait(); if (messageReceived) { echo("\tOK: CounterMonitor notification received"); } else { @@ -212,12 +213,9 @@ echo(">>> START the GaugeMonitor"); gaugeMonitor.start(); - // Wait for granularity period (multiplied by 2 for sure) - // - Thread.sleep(granularityperiod * 2); - // Check if notification was received // + doWait(); if (messageReceived) { echo("\tOK: GaugeMonitor notification received"); } else { @@ -289,12 +287,9 @@ echo(">>> START the StringMonitor"); stringMonitor.start(); - // Wait for granularity period (multiplied by 2 for sure) - // - Thread.sleep(granularityperiod * 2); - // Check if notification was received // + doWait(); if (messageReceived) { echo("\tOK: StringMonitor notification received"); } else { @@ -334,6 +329,21 @@ } /* + * Wait messageReceived to be true + */ + synchronized void doWait() { + while (!messageReceived) { + try { + wait(); + } catch (InterruptedException e) { + System.err.println("Got unexpected exception: " + e); + e.printStackTrace(); + break; + } + } + } + + /* * Standalone entry point. * * Run the test and report to stdout. --- ./jdk/test/javax/management/monitor/ReflectionExceptionTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/management/monitor/ReflectionExceptionTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -87,7 +87,11 @@ echo("\tObservedAttribute: " + mn.getObservedAttribute()); echo("\tDerivedGauge: " + mn.getDerivedGauge()); echo("\tTrigger: " + mn.getTrigger()); - messageReceived = true; + + synchronized (this) { + messageReceived = true; + notifyAll(); + } } } } @@ -135,12 +139,9 @@ echo(">>> START the CounterMonitor"); counterMonitor.start(); - // Wait for granularity period (multiplied by 2 for sure) - // - Thread.sleep(granularityperiod * 2); - // Check if notification was received // + doWait(); if (messageReceived) { echo("\tOK: CounterMonitor got RUNTIME_ERROR notification!"); } else { @@ -203,12 +204,9 @@ echo(">>> START the GaugeMonitor"); gaugeMonitor.start(); - // Wait for granularity period (multiplied by 2 for sure) - // - Thread.sleep(granularityperiod * 2); - // Check if notification was received // + doWait(); if (messageReceived) { echo("\tOK: GaugeMonitor got RUNTIME_ERROR notification!"); } else { @@ -270,12 +268,9 @@ echo(">>> START the StringMonitor"); stringMonitor.start(); - // Wait for granularity period (multiplied by 2 for sure) - // - Thread.sleep(granularityperiod * 2); - // Check if notification was received // + doWait(); if (messageReceived) { echo("\tOK: StringMonitor got RUNTIME_ERROR notification!"); } else { @@ -349,8 +344,23 @@ } } + /* + * Wait messageReceived to be true + */ + synchronized void doWait() { + while (!messageReceived) { + try { + wait(); + } catch (InterruptedException e) { + System.err.println("Got unexpected exception: " + e); + e.printStackTrace(); + break; + } + } + } + // Flag to notify that a message has been received - private boolean messageReceived = false; + private volatile boolean messageReceived = false; private MBeanServer server; private ObjectName obsObjName; --- ./jdk/test/javax/management/monitor/RuntimeExceptionTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/management/monitor/RuntimeExceptionTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -86,7 +86,11 @@ echo("\tObservedAttribute: " + mn.getObservedAttribute()); echo("\tDerivedGauge: " + mn.getDerivedGauge()); echo("\tTrigger: " + mn.getTrigger()); - messageReceived = true; + + synchronized (this) { + messageReceived = true; + notifyAll(); + } } } } @@ -134,12 +138,9 @@ echo(">>> START the CounterMonitor"); counterMonitor.start(); - // Wait for granularity period (multiplied by 2 for sure) - // - Thread.sleep(granularityperiod * 2); - // Check if notification was received // + doWait(); if (messageReceived) { echo("\tOK: CounterMonitor got RUNTIME_ERROR notification!"); } else { @@ -202,12 +203,9 @@ echo(">>> START the GaugeMonitor"); gaugeMonitor.start(); - // Wait for granularity period (multiplied by 2 for sure) - // - Thread.sleep(granularityperiod * 2); - // Check if notification was received // + doWait(); if (messageReceived) { echo("\tOK: GaugeMonitor got RUNTIME_ERROR notification!"); } else { @@ -269,12 +267,9 @@ echo(">>> START the StringMonitor"); stringMonitor.start(); - // Wait for granularity period (multiplied by 2 for sure) - // - Thread.sleep(granularityperiod * 2); - // Check if notification was received // + doWait(); if (messageReceived) { echo("\tOK: StringMonitor got RUNTIME_ERROR notification!"); } else { @@ -347,8 +342,23 @@ } } + /* + * Wait messageReceived to be true + */ + synchronized void doWait() { + while (!messageReceived) { + try { + wait(); + } catch (InterruptedException e) { + System.err.println("Got unexpected exception: " + e); + e.printStackTrace(); + break; + } + } + } + // Flag to notify that a message has been received - private boolean messageReceived = false; + private volatile boolean messageReceived = false; private MBeanServer server; private ObjectName obsObjName; --- ./jdk/test/javax/management/mxbean/MXBeanFallbackTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/management/mxbean/MXBeanFallbackTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -30,7 +30,7 @@ * @author Jaroslav Bachorik * @run clean MXBeanFallbackTest * @run build MXBeanFallbackTest - * @run main MXBeanFallbackTest + * @run main/othervm -Djdk.jmx.mbeans.allowNonPublic=true MXBeanFallbackTest */ import javax.management.MBeanServer; @@ -40,7 +40,6 @@ public class MXBeanFallbackTest { public static void main(String[] args) throws Exception { - System.setProperty("jdk.jmx.mbeans.allowNonPublic", "true"); testPrivateMXBean("Private", new Private()); if (failures == 0) --- ./jdk/test/javax/management/proxy/JMXProxyFallbackTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/management/proxy/JMXProxyFallbackTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -36,7 +36,7 @@ * @author Jaroslav Bachorik * @run clean JMXProxyFallbackTest * @run build JMXProxyFallbackTest - * @run main JMXProxyFallbackTest + * @run main/othervm -Djdk.jmx.mbeans.allowNonPublic=true JMXProxyFallbackTest */ public class JMXProxyFallbackTest { private static interface PrivateMBean { @@ -56,7 +56,6 @@ private static int failures = 0; public static void main(String[] args) throws Exception { - System.setProperty("jdk.jmx.mbeans.allowNonPublic", "true"); testPrivate(PrivateMBean.class); testPrivate(PrivateMXBean.class); --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/net/ssl/TLSv12/ProtocolFilter.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,315 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +// SunJSSE does not support dynamic system properties, no way to re-use +// system properties in samevm/agentvm mode. + +/* + * @test + * @bug 8052406 + * @summary SSLv2Hello protocol may be filter out unexpectedly + * @run main/othervm ProtocolFilter + */ + +import java.io.*; +import java.net.*; +import javax.net.ssl.*; + +public class ProtocolFilter { + + /* + * ============================================================= + * Set the various variables needed for the tests, then + * specify what tests to run on each side. + */ + + /* + * Should we run the client or server in a separate thread? + * Both sides can throw exceptions, but do you have a preference + * as to which side should be the main thread. + */ + static boolean separateServerThread = false; + + /* + * Where do we find the keystores? + */ + static String pathToStores = "../../../../sun/security/ssl/etc"; + static String keyStoreFile = "keystore"; + static String trustStoreFile = "truststore"; + static String passwd = "passphrase"; + + /* + * Is the server ready to serve? + */ + volatile static boolean serverReady = false; + + /* + * Turn on SSL debugging? + */ + static boolean debug = false; + + /* + * If the client or server is doing some kind of object creation + * that the other side depends on, and that thread prematurely + * exits, you may experience a hang. The test harness will + * terminate all hung threads after its timeout has expired, + * currently 3 minutes by default, but you might try to be + * smart about it.... + */ + + /* + * Define the server side of the test. + * + * If the server prematurely exits, serverReady will be set to true + * to avoid infinite hangs. + */ + void doServerSide() throws Exception { + SSLServerSocketFactory sslssf = + (SSLServerSocketFactory) SSLServerSocketFactory.getDefault(); + SSLServerSocket sslServerSocket = + (SSLServerSocket) sslssf.createServerSocket(serverPort); + + // Only enable cipher suites for TLS v1.2. + sslServerSocket.setEnabledCipherSuites( + new String[]{"TLS_RSA_WITH_AES_128_CBC_SHA256"}); + + serverPort = sslServerSocket.getLocalPort(); + + /* + * Signal Client, we're ready for his connect. + */ + serverReady = true; + + SSLSocket sslSocket = (SSLSocket) sslServerSocket.accept(); + InputStream sslIS = sslSocket.getInputStream(); + OutputStream sslOS = sslSocket.getOutputStream(); + + sslIS.read(); + sslOS.write(85); + sslOS.flush(); + + sslSocket.close(); + } + + /* + * Define the client side of the test. + * + * If the server prematurely exits, serverReady will be set to true + * to avoid infinite hangs. + */ + void doClientSide() throws Exception { + + /* + * Wait for server to get started. + */ + while (!serverReady) { + Thread.sleep(50); + } + + SSLSocketFactory sslsf = + (SSLSocketFactory) SSLSocketFactory.getDefault(); + SSLSocket sslSocket = (SSLSocket) + sslsf.createSocket("localhost", serverPort); + + // Enable all supported protocols, including SSLv2Hello. + sslSocket.setEnabledProtocols(sslSocket.getSupportedProtocols()); + + InputStream sslIS = sslSocket.getInputStream(); + OutputStream sslOS = sslSocket.getOutputStream(); + + sslOS.write(280); + sslOS.flush(); + sslIS.read(); + + sslSocket.close(); + } + + /* + * ============================================================= + * The remainder is just support stuff + */ + + // use any free port by default + volatile int serverPort = 0; + + volatile Exception serverException = null; + volatile Exception clientException = null; + + public static void main(String[] args) throws Exception { + String keyFilename = + System.getProperty("test.src", ".") + "/" + pathToStores + + "/" + keyStoreFile; + String trustFilename = + System.getProperty("test.src", ".") + "/" + pathToStores + + "/" + trustStoreFile; + + System.setProperty("javax.net.ssl.keyStore", keyFilename); + System.setProperty("javax.net.ssl.keyStorePassword", passwd); + System.setProperty("javax.net.ssl.trustStore", trustFilename); + System.setProperty("javax.net.ssl.trustStorePassword", passwd); + + if (debug) + System.setProperty("javax.net.debug", "all"); + + /* + * Start the tests. + */ + new ProtocolFilter(); + } + + Thread clientThread = null; + Thread serverThread = null; + + /* + * Primary constructor, used to drive remainder of the test. + * + * Fork off the other side, then do your work. + */ + ProtocolFilter() throws Exception { + Exception startException = null; + try { + if (separateServerThread) { + startServer(true); + startClient(false); + } else { + startClient(true); + startServer(false); + } + } catch (Exception e) { + startException = e; + } + + /* + * Wait for other side to close down. + */ + if (separateServerThread) { + if (serverThread != null) { + serverThread.join(); + } + } else { + if (clientThread != null) { + clientThread.join(); + } + } + + /* + * When we get here, the test is pretty much over. + * Which side threw the error? + */ + Exception local; + Exception remote; + + if (separateServerThread) { + remote = serverException; + local = clientException; + } else { + remote = clientException; + local = serverException; + } + + Exception exception = null; + + /* + * Check various exception conditions. + */ + if ((local != null) && (remote != null)) { + // If both failed, return the curthread's exception. + local.initCause(remote); + exception = local; + } else if (local != null) { + exception = local; + } else if (remote != null) { + exception = remote; + } else if (startException != null) { + exception = startException; + } + + /* + * If there was an exception *AND* a startException, + * output it. + */ + if (exception != null) { + if (exception != startException && startException != null) { + exception.addSuppressed(startException); + } + throw exception; + } + + // Fall-through: no exception to throw! + } + + void startServer(boolean newThread) throws Exception { + if (newThread) { + serverThread = new Thread() { + public void run() { + try { + doServerSide(); + } catch (Exception e) { + /* + * Our server thread just died. + * + * Release the client, if not active already... + */ + System.err.println("Server died..."); + serverReady = true; + serverException = e; + } + } + }; + serverThread.start(); + } else { + try { + doServerSide(); + } catch (Exception e) { + serverException = e; + } finally { + serverReady = true; + } + } + } + + void startClient(boolean newThread) throws Exception { + if (newThread) { + clientThread = new Thread() { + public void run() { + try { + doClientSide(); + } catch (Exception e) { + /* + * Our client thread just died. + */ + System.err.println("Client died..."); + clientException = e; + } + } + }; + clientThread.start(); + } else { + try { + doClientSide(); + } catch (Exception e) { + clientException = e; + } + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/DataTransfer/8059739/bug8059739.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + @bug 8059739 + @summary Dragged and Dropped data is corrupted for two data types + @author Anton Nashatyrev +*/ + +import javax.swing.*; +import java.awt.datatransfer.Clipboard; +import java.awt.datatransfer.DataFlavor; +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; + +public class bug8059739 { + + private static boolean passed = true; + + public static void main(String[] args) throws Exception { + SwingUtilities.invokeAndWait(new Runnable() { + @Override + public void run() { + try { + runTest(); + } catch (Exception e) { + e.printStackTrace(); + passed = false; + } + } + }); + + if (!passed) { + throw new RuntimeException("Test FAILED."); + } else { + System.out.println("Passed."); + } + } + + private static void runTest() throws Exception { + String testString = "my string"; + JTextField tf = new JTextField(testString); + tf.selectAll(); + Clipboard clipboard = new Clipboard("clip"); + tf.getTransferHandler().exportToClipboard(tf, clipboard, TransferHandler.COPY); + DataFlavor[] dfs = clipboard.getAvailableDataFlavors(); + for (DataFlavor df: dfs) { + String charset = df.getParameter("charset"); + if (InputStream.class.isAssignableFrom(df.getRepresentationClass()) && + charset != null) { + BufferedReader br = new BufferedReader(new InputStreamReader( + (InputStream) clipboard.getData(df), charset)); + String s = br.readLine(); + System.out.println("Content: '" + s + "'"); + passed &= s.contains(testString); + } + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/JColorChooser/8065098/JColorChooserDnDTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +import javax.swing.BorderFactory; +import javax.swing.JColorChooser; +import javax.swing.JFrame; +import javax.swing.JPanel; +import javax.swing.SwingUtilities; + +/* + * @bug 8065098 + * @summary JColorChooser no longer supports drag and drop + * between two JVM instances + */ +public class JColorChooserDnDTest { + + public static void main(String[] args) { + SwingUtilities.invokeLater(new Runnable() { + + @Override + public void run() { + JFrame frame = new JFrame(); + frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + JPanel panel = new JPanel(); + JColorChooser colorChooser = new JColorChooser(); + colorChooser.setDragEnabled(true); + panel.setBorder(BorderFactory.createTitledBorder("JColorChoosers")); + panel.add(colorChooser); + frame.setContentPane(panel); + frame.pack(); + frame.setVisible(true); + } + }); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/JColorChooser/8065098/bug8065098.html Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,40 @@ + + + + + 1. Compile the java test JColorChooserDnDTest.java: + > /bin/javac JColorChooserDnDTest.java + 2. Run the first instance of the java test: + > /bin/java JColorChooserDnDTest + 3. Select a color in the color chooser + 4. Run the second instance of the java test: + > /bin/java JColorChooserDnDTest + 5. Drag and drop the selected color from the first color chooser + preview panel to the second color chooser preview panel + 6. If the color is dragged to the second color chooser then the test passes. + + + + + \ No newline at end of file --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/JColorChooser/8065098/bug8065098.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +import javax.swing.JApplet; + +/* + * @test + * @bug 8065098 + * @summary JColorChooser no longer supports drag and drop + * between two JVM instances + * @run applet/manual=yesno bug8065098.html + */ +public class bug8065098 extends JApplet { + +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/JComboBox/8057893/bug8057893.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.EventQueue; +import java.awt.Robot; +import java.awt.Toolkit; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.event.KeyEvent; +import javax.swing.JComboBox; +import javax.swing.JFrame; +import javax.swing.WindowConstants; +import sun.awt.SunToolkit; + +/** + * @test + * @bug 8057893 + * @author Alexander Scherbatiy + * @summary JComboBox actionListener never receives "comboBoxEdited" + * from getActionCommand + * @run main bug8057893 + */ +public class bug8057893 { + + private static volatile boolean isComboBoxEdited = false; + + public static void main(String[] args) throws Exception { + Robot robot = new Robot(); + robot.setAutoDelay(50); + SunToolkit toolkit = (SunToolkit) Toolkit.getDefaultToolkit(); + + EventQueue.invokeAndWait(() -> { + JFrame frame = new JFrame(); + frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); + JComboBox comboBox = new JComboBox<>(new String[]{"one", "two"}); + comboBox.setEditable(true); + comboBox.addActionListener(new ActionListener() { + + @Override + public void actionPerformed(ActionEvent e) { + if ("comboBoxEdited".equals(e.getActionCommand())) { + isComboBoxEdited = true; + } + } + }); + frame.add(comboBox); + frame.pack(); + frame.setVisible(true); + comboBox.requestFocusInWindow(); + }); + + toolkit.realSync(); + + robot.keyPress(KeyEvent.VK_A); + robot.keyRelease(KeyEvent.VK_A); + robot.keyPress(KeyEvent.VK_ENTER); + robot.keyRelease(KeyEvent.VK_ENTER); + toolkit.realSync(); + + if(!isComboBoxEdited){ + throw new RuntimeException("ComboBoxEdited event is not fired!"); + } + } +} --- ./jdk/test/javax/swing/JComboBox/ConsumedEscTest/ConsumedEscTest.java Mon Dec 08 12:29:42 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -import javax.swing.*; -import java.awt.event.ActionEvent; -import java.awt.event.KeyEvent; -import java.awt.Robot; -import java.awt.Toolkit; -import sun.awt.SunToolkit; - -/* - @test - @bug 8031485 - @summary Combo box consuming escape and enter key events - @author Petr Pchelko - @run main ConsumedEscTest -*/ -public class ConsumedEscTest { - private static volatile JFrame frame; - private static volatile boolean passed = false; - - public static void main(String... args) throws Exception { - try { - SwingUtilities.invokeAndWait(() -> { - frame = new JFrame(); - JComboBox combo = new JComboBox<>(new String[]{"one", "two", "three"}); - JPanel panel = new JPanel(); - panel.add(combo); - combo.requestFocusInWindow(); - frame.setBounds(100, 150, 300, 100); - addAction(panel); - frame.add(panel); - frame.setVisible(true); - }); - - Robot robot = new Robot(); - robot.waitForIdle(); - ((SunToolkit)Toolkit.getDefaultToolkit()).realSync(); - robot.keyPress(KeyEvent.VK_ESCAPE); - robot.waitForIdle(); - ((SunToolkit)Toolkit.getDefaultToolkit()).realSync(); - robot.keyRelease(KeyEvent.VK_ESCAPE); - robot.waitForIdle(); - ((SunToolkit)Toolkit.getDefaultToolkit()).realSync(); - if (!passed) { - throw new RuntimeException("FAILED: ESC was consumed by combo box"); - } - } finally { - if (frame != null) { - frame.dispose(); - } - } - } - - private static void addAction(JComponent comp) { - KeyStroke k = KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0); - Object actionKey = "cancel"; - comp.getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(k, actionKey); - Action cancelAction = new AbstractAction() { - @Override - public void actionPerformed(ActionEvent ev) { - passed = true; - } - }; - comp.getActionMap().put(actionKey, cancelAction); - } - -} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/JComboBox/ConsumedKeyTest/ConsumedKeyTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import javax.swing.*; +import java.awt.event.ActionEvent; +import java.awt.event.KeyEvent; +import java.awt.Toolkit; +import java.awt.Robot; +import sun.awt.SunToolkit; + +/* + @test + @bug 8031485 8058193 + @summary Combo box consuming escape and enter key events + @author Petr Pchelko + @run main ConsumedKeyTest +*/ +public class ConsumedKeyTest { + private static volatile JFrame frame; + private static volatile boolean passed; + + public static void main(String... args) throws Exception { + test(KeyEvent.VK_ESCAPE); + test(KeyEvent.VK_ENTER); + } + + private static void test(final int key) throws Exception { + passed = false; + try { + SwingUtilities.invokeAndWait(() -> { + frame = new JFrame(); + JComboBox combo = new JComboBox<>(new String[]{"one", "two", "three"}); + JPanel panel = new JPanel(); + panel.add(combo); + combo.requestFocusInWindow(); + frame.setBounds(100, 150, 300, 100); + addAction(panel, key); + frame.add(panel); + frame.setVisible(true); + }); + + Robot robot = new Robot(); + robot.waitForIdle(); + ((SunToolkit)Toolkit.getDefaultToolkit()).realSync(); + robot.keyPress(key); + robot.waitForIdle(); + ((SunToolkit)Toolkit.getDefaultToolkit()).realSync(); + robot.keyRelease(key); + robot.waitForIdle(); + ((SunToolkit)Toolkit.getDefaultToolkit()).realSync(); + if (!passed) { + throw new RuntimeException("FAILED: " + KeyEvent.getKeyText(key) + " was consumed by combo box"); + } + } finally { + if (frame != null) { + frame.dispose(); + } + } + + } + + private static void addAction(JComponent comp, final int key) { + KeyStroke k = KeyStroke.getKeyStroke(key, 0); + Object actionKey = "cancel"; + comp.getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(k, actionKey); + Action cancelAction = new AbstractAction() { + @Override + public void actionPerformed(ActionEvent ev) { + passed = true; + } + }; + comp.getActionMap().put(actionKey, cancelAction); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/JFileChooser/8046391/bug8046391.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8046391 + * @summary JFileChooser hangs if displayed in Windows L&F + * @author Alexey Ivanov + * @run main/othervm/timeout=10 bug8046391 +*/ + +import com.sun.java.swing.plaf.windows.WindowsLookAndFeel; +import sun.awt.OSInfo; +import sun.awt.OSInfo.OSType; + +import javax.swing.JFileChooser; +import javax.swing.SwingUtilities; +import javax.swing.UIManager; +import javax.swing.UnsupportedLookAndFeelException; + +public class bug8046391 { + + public static void main(String[] args) throws Exception { + OSType type = OSInfo.getOSType(); + if (type != OSType.WINDOWS) { + System.out.println("This test is for Windows only... skipping!"); + return; + } + + SwingUtilities.invokeAndWait(() -> { + try { + UIManager.setLookAndFeel(new WindowsLookAndFeel()); + } catch (UnsupportedLookAndFeelException e) { + e.printStackTrace(); + } + System.out.println("Creating JFileChooser..."); + JFileChooser fileChooser = new JFileChooser(); + System.out.println("Test passed: chooser = " + fileChooser); + }); + // Test fails if creating JFileChooser hangs + } + +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/JFileChooser/FileFilterDescription/FileFilterDescription.html Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,40 @@ + + + + + + + +Follow the instructions below. +1) Check that current filter in the opened JFileChooser is a "CustomFileFilter". +2) Close the JFileChooser. +3) Test will repeat steps 1 - 2 for all supported look and feels. +4) If it's true for all look and feels then the test passed, otherwise it failed. + + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/JFileChooser/FileFilterDescription/FileFilterDescription.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.applet.Applet; +import java.io.File; + +import javax.swing.JFileChooser; +import javax.swing.SwingUtilities; +import javax.swing.UIManager; +import javax.swing.UnsupportedLookAndFeelException; +import javax.swing.filechooser.FileFilter; + +public final class FileFilterDescription extends Applet { + + @Override + public void init() { + } + + @Override + public void start() { + try { + test(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + + public static void test() throws Exception { + final UIManager.LookAndFeelInfo[] infos = UIManager + .getInstalledLookAndFeels(); + for (final UIManager.LookAndFeelInfo info : infos) { + SwingUtilities.invokeAndWait(() -> { + final JFileChooser chooser = new JFileChooser(); + setLookAndFeel(info); + chooser.setAcceptAllFileFilterUsed(false); + chooser.setFileFilter(new CustomFileFilter()); + SwingUtilities.updateComponentTreeUI(chooser); + chooser.showDialog(null, "Open"); + }); + } + } + + private static void setLookAndFeel(final UIManager.LookAndFeelInfo info) { + try { + UIManager.setLookAndFeel(info.getClassName()); + } catch (ClassNotFoundException | InstantiationException | + UnsupportedLookAndFeelException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + private static class CustomFileFilter extends FileFilter { + + @Override + public boolean accept(final File f) { + return false; + } + + @Override + public String getDescription() { + return "CustomFileFilter"; + } + } +} \ No newline at end of file --- ./jdk/test/javax/swing/JMenuItem/8031573/bug8031573.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/swing/JMenuItem/8031573/bug8031573.java Wed Feb 04 12:14:43 2015 -0800 @@ -28,7 +28,7 @@ import javax.swing.SwingUtilities; /* @test - * @bug 8031573 + * @bug 8031573 8040279 * @summary [macosx] Checkmarks of JCheckBoxMenuItems aren't rendered * in high resolution on Retina * @author Alexander Scherbatiy --- ./jdk/test/javax/swing/JOptionPane/8024926/bug8024926.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/javax/swing/JOptionPane/8024926/bug8024926.java Wed Feb 04 12:14:43 2015 -0800 @@ -31,7 +31,7 @@ /** * @test - * @bug 8024926 + * @bug 8024926 8040279 * @summary [macosx] AquaIcon HiDPI support * @author Alexander Scherbatiy * @run applet/manual=yesno bug8024926.html --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/JRadioButton/8033699/bug8033699.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + /* + * @test + * @library ../../regtesthelpers + * @build Util + * @bug 8033699 + * @summary Incorrect radio button behavior when pressing tab key + * @author Vivi An + * @run main bug8033699 + */ + +import javax.swing.*; +import javax.swing.event.*; +import java.awt.event.*; +import java.awt.*; +import sun.awt.SunToolkit; + +public class bug8033699 { + private static Robot robot; + private static SunToolkit toolkit; + + private static JButton btnStart; + private static ButtonGroup btnGrp; + private static JButton btnEnd; + private static JButton btnMiddle; + private static JRadioButton radioBtn1; + private static JRadioButton radioBtn2; + private static JRadioButton radioBtn3; + private static JRadioButton radioBtnSingle; + + public static void main(String args[]) throws Throwable { + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + createAndShowGUI(); + } + }); + + robot = new Robot(); + Thread.sleep(100); + + robot.setAutoDelay(100); + toolkit = (SunToolkit) Toolkit.getDefaultToolkit(); + + // tab key test grouped radio button + runTest1(); + + // tab key test non-grouped radio button + runTest2(); + + // shift tab key test grouped and non grouped radio button + runTest3(); + + // left/up key test in grouped radio button + runTest4(); + + // down/right key test in grouped radio button + runTest5(); + + // tab from radio button in group to next component in the middle of button group layout + runTest6(); + + // tab to radio button in group from component in the middle of button group layout + runTest7(); + + // down key circle back to first button in grouped radio button + runTest8(); + } + + private static void createAndShowGUI() { + JFrame mainFrame = new JFrame("Bug 8033699 - 8 Tests for Grouped/Non Group Radio Buttons"); + + btnStart = new JButton("Start"); + btnEnd = new JButton("End"); + btnMiddle = new JButton("Middle"); + + JPanel box = new JPanel(); + box.setLayout(new BoxLayout(box, BoxLayout.Y_AXIS)); + box.setBorder(BorderFactory.createTitledBorder("Grouped Radio Buttons")); + radioBtn1 = new JRadioButton("A"); + radioBtn2 = new JRadioButton("B"); + radioBtn3 = new JRadioButton("C"); + + ButtonGroup btnGrp = new ButtonGroup(); + btnGrp.add(radioBtn1); + btnGrp.add(radioBtn2); + btnGrp.add(radioBtn3); + radioBtn1.setSelected(true); + + box.add(radioBtn1); + box.add(radioBtn2); + box.add(btnMiddle); + box.add(radioBtn3); + + radioBtnSingle = new JRadioButton("Not Grouped"); + radioBtnSingle.setSelected(true); + + mainFrame.getContentPane().add(btnStart); + mainFrame.getContentPane().add(box); + mainFrame.getContentPane().add(radioBtnSingle); + mainFrame.getContentPane().add(btnEnd); + + mainFrame.getRootPane().setDefaultButton(btnStart); + btnStart.requestFocus(); + + mainFrame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + mainFrame.setLayout(new BoxLayout(mainFrame.getContentPane(), BoxLayout.Y_AXIS)); + + mainFrame.setSize(300, 300); + mainFrame.setLocation(200, 200); + mainFrame.setVisible(true); + mainFrame.toFront(); + } + + // Radio button Group as a single component when traversing through tab key + private static void runTest1() throws Exception{ + hitKey(robot, KeyEvent.VK_TAB); + hitKey(robot, KeyEvent.VK_TAB); + + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + if (KeyboardFocusManager.getCurrentKeyboardFocusManager().getFocusOwner() != radioBtnSingle) { + System.out.println("Radio Button Group Go To Next Component through Tab Key failed"); + throw new RuntimeException("Focus is not on Radio Button Single as Expected"); + } + } + }); + } + + // Non-Grouped Radio button as a single component when traversing through tab key + private static void runTest2() throws Exception{ + hitKey(robot, KeyEvent.VK_TAB); + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + if (KeyboardFocusManager.getCurrentKeyboardFocusManager().getFocusOwner() != btnEnd) { + System.out.println("Non Grouped Radio Button Go To Next Component through Tab Key failed"); + throw new RuntimeException("Focus is not on Button End as Expected"); + } + } + }); + } + + // Non-Grouped Radio button and Group Radio button as a single component when traversing through shift-tab key + private static void runTest3() throws Exception{ + hitKey(robot, KeyEvent.VK_SHIFT, KeyEvent.VK_TAB); + hitKey(robot, KeyEvent.VK_SHIFT, KeyEvent.VK_TAB); + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + if (KeyboardFocusManager.getCurrentKeyboardFocusManager().getFocusOwner() != radioBtn3) { + System.out.println("Radio button Group/Non Grouped Radio Button SHIFT-Tab Key Test failed"); + throw new RuntimeException("Focus is not on Radio Button C as Expected"); + } + } + }); + } + + // Using arrow key to move focus in radio button group + private static void runTest4() throws Exception{ + hitKey(robot, KeyEvent.VK_UP); + hitKey(robot, KeyEvent.VK_LEFT); + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + if (KeyboardFocusManager.getCurrentKeyboardFocusManager().getFocusOwner() != radioBtn1) { + System.out.println("Radio button Group UP/LEFT Arrow Key Move Focus Failed"); + throw new RuntimeException("Focus is not on Radio Button A as Expected"); + } + } + }); + } + + private static void runTest5() throws Exception{ + hitKey(robot, KeyEvent.VK_DOWN); + hitKey(robot, KeyEvent.VK_RIGHT); + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + if (KeyboardFocusManager.getCurrentKeyboardFocusManager().getFocusOwner() != radioBtn3) { + System.out.println("Radio button Group Left/Up Arrow Key Move Focus Failed"); + throw new RuntimeException("Focus is not on Radio Button C as Expected"); + } + } + }); + } + + private static void runTest6() throws Exception{ + hitKey(robot, KeyEvent.VK_DOWN); + hitKey(robot, KeyEvent.VK_DOWN); + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + if (KeyboardFocusManager.getCurrentKeyboardFocusManager().getFocusOwner() != radioBtn2) { + System.out.println("Radio button Group Circle Back To First Button Test"); + throw new RuntimeException("Focus is not on Radio Button A as Expected"); + } + } + }); + } + + private static void runTest7() throws Exception{ + hitKey(robot, KeyEvent.VK_TAB); + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + if (KeyboardFocusManager.getCurrentKeyboardFocusManager().getFocusOwner() != btnMiddle) { + System.out.println("Separate Component added in button group layout"); + throw new RuntimeException("Focus is not on Middle Button as Expected"); + } + } + }); + } + + private static void runTest8() throws Exception{ + hitKey(robot, KeyEvent.VK_TAB); + SwingUtilities.invokeAndWait(new Runnable() { + public void run() { + if (KeyboardFocusManager.getCurrentKeyboardFocusManager().getFocusOwner() != radioBtn3) { + System.out.println("Separate Component added in button group layout"); + throw new RuntimeException("Focus is not on Radio Button C as Expected"); + } + } + }); + } + + private static void hitKey(Robot robot, int keycode) { + robot.keyPress(keycode); + robot.keyRelease(keycode); + toolkit.realSync(); + } + + private static void hitKey(Robot robot, int mode, int keycode) { + robot.keyPress(mode); + robot.keyPress(keycode); + robot.keyRelease(mode); + robot.keyRelease(keycode); + toolkit.realSync(); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/JTabbedPane/7170310/bug7170310.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.Component; +import java.awt.Dimension; +import java.awt.Rectangle; +import java.awt.Toolkit; +import javax.swing.JComponent; +import javax.swing.JFrame; +import javax.swing.JPanel; +import javax.swing.JTabbedPane; +import javax.swing.JViewport; +import javax.swing.SwingUtilities; +import javax.swing.UIManager; +import javax.swing.plaf.metal.MetalLookAndFeel; + +import sun.awt.SunToolkit; + +/** + * @test + * @bug 7170310 + * @author Alexey Ivanov + * @summary Selected tab should be scrolled into view. + * @run main bug7170310 + */ +public class bug7170310 { + private static final int TABS_NUMBER = 3; + + private static volatile JTabbedPane tabbedPane; + private static volatile int count = 1; + + private static volatile JFrame frame; + + private static volatile Exception exception = null; + + public static void main(String[] args) throws Exception { + try { + UIManager.setLookAndFeel(new MetalLookAndFeel()); + SwingUtilities.invokeAndWait(bug7170310::createAndShowUI); + + SunToolkit toolkit = (SunToolkit) Toolkit.getDefaultToolkit(); + toolkit.realSync(); + + for (int i = 0; i < TABS_NUMBER; i++) { + SwingUtilities.invokeAndWait(bug7170310::addTab); + toolkit.realSync(); + } + + SwingUtilities.invokeAndWait(bug7170310::check); + + if (exception != null) { + System.out.println("Test failed: " + exception.getMessage()); + throw exception; + } else { + System.out.printf("Test passed"); + } + } finally { + frame.dispose(); + } + } + + private static void createAndShowUI() { + frame = new JFrame("bug7170310"); + frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + frame.setSize(200, 100); + + tabbedPane = new JTabbedPane(); + tabbedPane.addTab("Main Tab", new JPanel()); + + tabbedPane.setTabLayoutPolicy(JTabbedPane.SCROLL_TAB_LAYOUT); + + frame.getContentPane().add(tabbedPane); + frame.setVisible(true); + } + + private static void addTab() { + tabbedPane.addTab("Added Tab " + count++, new JPanel()); + tabbedPane.setSelectedIndex(tabbedPane.getTabCount() - 1); + } + + private static void check() { + try { + JViewport vp = null; + for (Component c : tabbedPane.getComponents()) { + if (c instanceof JViewport) { + vp = (JViewport) c; + break; + } + } + + JComponent v = (JComponent) vp.getView(); + Rectangle vr = vp.getViewRect(); + Dimension vs = v.getSize(); + + // The tab view must be scrolled to the end so that the last tab is visible + if (vs.width != (vr.x + vr.width)) { + throw new RuntimeException("tabScroller.tabPanel view is positioned incorrectly: " + + vs.width + " vs " + (vr.x + vr.width)); + } + } catch (Exception e) { + exception = e; + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/text/View/8048110/bug8048110.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + * @bug 8048110 + * @summary Using tables in JTextPane leads to infinite loop in FlowLayout.layoutRow + * @author Dmitry Markov + * @run main bug8048110 + */ + +import sun.awt.SunToolkit; + +import javax.swing.*; +import javax.swing.text.Element; +import javax.swing.text.html.HTMLDocument; +import javax.swing.text.html.HTMLEditorKit; +import java.awt.*; + +public class bug8048110 { + private static SunToolkit toolkit = (SunToolkit)Toolkit.getDefaultToolkit(); + private static Object lock = new Object(); + private static boolean isRealSyncPerformed = false; + private static final String htmlText = "" + + "" + + "
DevicesState
PCOk
"; + + public static void main(String[] args) throws Exception { + SwingUtilities.invokeAndWait(new Runnable() { + @Override + public void run() { + createAndShowGUI(); + } + }); + + Thread thread = new Thread() { + @Override + public void run() { + toolkit.realSync(); + synchronized (lock) { + isRealSyncPerformed = true; + lock.notifyAll(); + } + } + }; + thread.start(); + + synchronized (lock) { + if (!isRealSyncPerformed) { + lock.wait(5000); + } + } + + if (!isRealSyncPerformed) { + throw new RuntimeException("Test Failed!"); + } + } + + private static void createAndShowGUI() { + try { + UIManager.setLookAndFeel("javax.swing.plaf.metal.MetalLookAndFeel"); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + HTMLEditorKit editorKit = new HTMLEditorKit(); + JTextPane textPane = new JTextPane(); + textPane.setContentType("text/html"); + textPane.setEditorKit(editorKit); + textPane.setText("Initial text without table"); + + JFrame frame = new JFrame("bug8048110"); + frame.getContentPane().add(textPane, BorderLayout.CENTER); + frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); + frame.setSize(500, 200); + frame.setVisible(true); + + textPane.setDocument(textPane.getEditorKit().createDefaultDocument()); + HTMLDocument htmlDocument = (HTMLDocument) textPane.getDocument(); + Element firstParagraph = findFirstElement(textPane.getDocument().getDefaultRootElement(), "p"); + + try { + htmlDocument.setInnerHTML(firstParagraph, htmlText); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } + + private static Element findFirstElement(Element e, String name) { + String elementName = e.getName(); + if (elementName != null && elementName.equalsIgnoreCase(name)) { + return e; + } + for (int i = 0; i < e.getElementCount(); i++) { + Element result = findFirstElement(e.getElement(i), name); + if (result != null) { + return result; + } + } + return null; + } +} + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/swing/text/html/HTMLDocument/8058120/bug8058120.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + * @bug 8058120 + * @summary Rendering / caret errors with HTMLDocument + * @author Dmitry Markov + * @run main bug8058120 + */ + +import sun.awt.SunToolkit; + +import javax.swing.*; +import javax.swing.text.Element; +import javax.swing.text.html.HTML; +import javax.swing.text.html.HTMLDocument; +import javax.swing.text.html.HTMLEditorKit; +import java.awt.*; + +public class bug8058120 { + private static SunToolkit toolkit = (SunToolkit) Toolkit.getDefaultToolkit(); + private static HTMLDocument document = null; + private static final String text = "

ab

"; + private static final String textToInsert = "c"; + + public static void main(String[] args) { + SwingUtilities.invokeLater(new Runnable() { + @Override + public void run() { + createAndShowGUI(); + } + }); + + toolkit.realSync(); + + SwingUtilities.invokeLater(new Runnable() { + @Override + public void run() { + try { + document.insertAfterEnd(document.getElement("ab"), textToInsert); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } + }); + + toolkit.realSync(); + + SwingUtilities.invokeLater(new Runnable() { + @Override + public void run() { + Element parent = document.getElement("ab").getParentElement(); + int count = parent.getElementCount(); + if (count != 2) { + throw new RuntimeException("Test Failed! Unexpected Element count = "+count); + } + Element insertedElement = parent.getElement(count - 1); + if (!HTML.Tag.IMPLIED.toString().equals(insertedElement.getName())) { + throw new RuntimeException("Test Failed! Inserted text is not wrapped by " + HTML.Tag.IMPLIED + " tag"); + } + } + }); + } + + private static void createAndShowGUI() { + try { + UIManager.setLookAndFeel("javax.swing.plaf.metal.MetalLookAndFeel"); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + JFrame frame = new JFrame("bug8058120"); + frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + + JEditorPane editorPane = new JEditorPane(); + editorPane.setContentType("text/html"); + editorPane.setEditorKit(new HTMLEditorKit()); + + document = (HTMLDocument) editorPane.getDocument(); + + editorPane.setText(text); + + frame.add(editorPane); + frame.setSize(200, 200); + frame.setVisible(true); + } +} + + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/xml/jaxp/common/8032908/TestFunc.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import org.w3c.dom.Node; + +public class TestFunc { + + public static String test(Node node) { + String s = node.getTextContent(); + return s; + } + +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/xml/jaxp/common/8032908/XSLT.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8032908 + * @summary Test if Node.getTextContent() function correctly returns children + * content + * @compile TestFunc.java XSLT.java + * @run main/othervm XSLT + */ +import java.io.ByteArrayOutputStream; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.stream.StreamSource; + +public class XSLT { + + static final String XMLTOTRANSFORM = "/in.xml"; + static final String XSLTRANSFORMER = "/test.xsl"; + static final String EXPECTEDRESULT = "ABCDEFG"; + + public static void main(String[] args) throws TransformerException { + ByteArrayOutputStream resStream = new ByteArrayOutputStream(); + TransformerFactory trf = TransformerFactory.newInstance(); + Transformer tr = trf.newTransformer(new StreamSource(System.getProperty("test.src", ".") + XSLTRANSFORMER)); + tr.transform(new StreamSource(System.getProperty("test.src", ".") + XMLTOTRANSFORM), new StreamResult(resStream)); + System.out.println("Transformation completed. Result:" + resStream.toString()); + if (!resStream.toString().equals(EXPECTEDRESULT)) { + throw new RuntimeException("Incorrect transformation result"); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/xml/jaxp/common/8032908/in.xml Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,2 @@ + +ABCDEFG --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/xml/jaxp/common/8032908/test.xsl Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,8 @@ + + + + + + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/javax/xml/jaxp/validation/8049514/FeaturePropagationTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +/** + * @test + * @bug 8049514 + * @summary verifies that feature set on the factory is propagated properly + * to the validator + * @run main/othervm FeaturePropagationTest + */ + + +import java.io.ByteArrayInputStream; +import java.io.InputStreamReader; +import javax.xml.XMLConstants; +import javax.xml.transform.stream.StreamSource; +import javax.xml.validation.*; + +/** + * JDK-8049514 + * + * FEATURE_SECURE_PROCESSING can not be turned off on a validator through + * SchemaFactory + */ +public class FeaturePropagationTest { + + static String xsd = "\n" + "\n" + + " \n" + + "\n"; + + public static void main(String[] args) throws Exception { + InputStreamReader reader = new InputStreamReader(new ByteArrayInputStream(xsd.getBytes())); + StreamSource xsdSource = new StreamSource(reader); + + SchemaFactory schemaFactory = SchemaFactory.newInstance("http://www.w3.org/2001/XMLSchema"); + schemaFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, false); + Schema schema = null; + schema = schemaFactory.newSchema(xsdSource); + + Validator validator = schema.newValidator(); + + if (validator.getFeature(XMLConstants.FEATURE_SECURE_PROCESSING)) { + throw new RuntimeException("Feature set on the factory is not inherited!"); + } + + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/jdk/net/Sockets/SupportedOptions.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8062744 + * @run main SupportedOptions + */ + +import java.net.*; +import java.io.IOException; +import jdk.net.*; + +public class SupportedOptions { + + public static void main(String[] args) throws Exception { + if (!Sockets.supportedOptions(ServerSocket.class) + .contains(StandardSocketOptions.IP_TOS)) { + throw new RuntimeException("Test failed"); + } + // Now set the option + ServerSocket ss = new ServerSocket(); + Sockets.setOption(ss, java.net.StandardSocketOptions.IP_TOS, 128); + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/lib/testlibrary/jdk/testlibrary/TimeLimitedRunner.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package jdk.testlibrary; + +import java.util.Objects; +import java.util.concurrent.Callable; + +/** + * Auxiliary class to run target w/ given timeout. + */ +public class TimeLimitedRunner implements Callable { + private final long stoptime; + private final long timeout; + private final double factor; + private final Callable target; + + /** + * @param timeout a timeout. zero means no time limitation + * @param factor a multiplier used to estimate next iteration time + * @param target a target to run + * @throws NullPointerException if target is null + * @throws IllegalArgumentException if timeout is negative or + factor isn't positive + */ + public TimeLimitedRunner(long timeout, double factor, + Callable target) { + Objects.requireNonNull(target, "target must not be null"); + if (timeout < 0) { + throw new IllegalArgumentException("timeout[" + timeout + "] < 0"); + } + if (factor <= 0d) { + throw new IllegalArgumentException("factor[" + factor + "] <= 0"); + } + this.stoptime = System.currentTimeMillis() + timeout; + this.timeout = timeout; + this.factor = factor; + this.target = target; + } + + /** + * Runs @{linkplan target} while it returns true and timeout isn't exceeded + */ + @Override + public Void call() throws Exception { + long maxDuration = 0L; + long iterStart = System.currentTimeMillis(); + if (timeout != 0 && iterStart > stoptime) { + return null; + } + while (target.call()) { + if (timeout != 0) { + long iterDuration = System.currentTimeMillis() - iterStart; + maxDuration = Math.max(maxDuration, iterDuration); + iterStart = System.currentTimeMillis(); + if (iterStart + (maxDuration * factor) > stoptime) { + System.out.println("Not enough time to continue execution. " + + "Interrupted."); + break; + } + } + } + return null; + } + +} --- ./jdk/test/lib/testlibrary/jdk/testlibrary/Utils.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/lib/testlibrary/jdk/testlibrary/Utils.java Wed Feb 04 12:14:43 2015 -0800 @@ -35,6 +35,7 @@ import java.util.Collections; import java.util.regex.Pattern; import java.util.regex.Matcher; +import java.util.concurrent.TimeUnit; /** * Common library for various test helper functions. @@ -67,6 +68,12 @@ TIMEOUT_FACTOR = Double.parseDouble(toFactor); } + /** + * Returns the value of JTREG default test timeout in milliseconds + * converted to {@code long}. + */ + public static final long DEFAULT_TEST_TIMEOUT = TimeUnit.SECONDS.toMillis(120); + private Utils() { // Private constructor to prevent class instantiation } @@ -259,4 +266,14 @@ throw t; } } + + /** + * Adjusts the provided timeout value for the TIMEOUT_FACTOR + * @param tOut the timeout value to be adjusted + * @return The timeout value adjusted for the value of "test.timeout.factor" + * system property + */ + public static long adjustTimeout(long tOut) { + return Math.round(tOut * Utils.TIMEOUT_FACTOR); + } } --- ./jdk/test/lib/testlibrary/jsr292/com/oracle/testlibrary/jsr292/Helper.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/lib/testlibrary/jsr292/com/oracle/testlibrary/jsr292/Helper.java Wed Feb 04 12:14:43 2015 -0800 @@ -52,7 +52,7 @@ public static final long TEST_LIMIT; static { String str = System.getProperty("testLimit"); - TEST_LIMIT = str != null ? Long.parseUnsignedLong(str) : 2_000L; + TEST_LIMIT = str != null ? Long.parseUnsignedLong(str) : 2000L; System.out.printf("-DtestLimit=%d%n", TEST_LIMIT); } @@ -116,6 +116,48 @@ return size <= lag ? null : calledLog.get(size - lag - 1); } + public static List> randomClasses(Class[] classes, int size) { + List> result = new ArrayList<>(size); + for (int i = 0; i < size; ++i) { + result.add(classes[RNG.nextInt(classes.length)]); + } + return result; + } + + public static List> getParams(List> classes, + boolean isVararg, int argsCount) { + boolean unmodifiable = true; + List> result = classes.subList(0, + Math.min(argsCount, (MAX_ARITY / 2) - 1)); + int extra = 0; + if (argsCount >= MAX_ARITY / 2) { + result = new ArrayList<>(result); + unmodifiable = false; + extra = (int) result.stream().filter(Helper::isDoubleCost).count(); + int i = result.size(); + while (result.size() + extra < argsCount) { + Class aClass = classes.get(i); + if (Helper.isDoubleCost(aClass)) { + ++extra; + if (result.size() + extra >= argsCount) { + break; + } + } + result.add(aClass); + } + } + if (isVararg && result.size() > 0) { + if (unmodifiable) { + result = new ArrayList<>(result); + } + int last = result.size() - 1; + Class aClass = result.get(last); + aClass = Array.newInstance(aClass, 2).getClass(); + result.set(last, aClass); + } + return result; + } + public static MethodHandle addTrailingArgs(MethodHandle target, int nargs, List> classes) { int targetLen = target.type().parameterCount(); @@ -230,7 +272,7 @@ return randomArgs(params.toArray(new Class[params.size()])); } - private static Object castToWrapper(Object value, Class dst) { + public static Object castToWrapper(Object value, Class dst) { Object wrap = null; if (value instanceof Number) { wrap = castToWrapperOrNull(((Number) value).longValue(), dst); @@ -268,7 +310,7 @@ if (dst == byte.class || dst == Byte.class) { return (byte) (value); } - if (dst == boolean.class || dst == boolean.class) { + if (dst == boolean.class || dst == Boolean.class) { return ((value % 29) & 1) == 0; } return null; --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/sun/awt/datatransfer/DataFlavorComparatorTest1.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + @bug 8058473 + @summary "Comparison method violates its general contract" when using Clipboard + Ensure that DataTransferer.DataFlavorComparator conforms to Comparator contract + @author Anton Nashatyrev + @run main DataFlavorComparatorTest1 +*/ +import sun.awt.datatransfer.DataTransferer; + +import java.awt.datatransfer.DataFlavor; +import java.util.Comparator; + +public class DataFlavorComparatorTest1 { + + public static void main(String[] args) throws Exception { + String[] mimes = new String[] { + "text/plain", + "text/plain; charset=unicode", + "text/plain; charset=cp1251", + "text/plain; charset=unicode; class=java.io.InputStream", + "text/plain; charset=unicode; class=java.io.Serializable", + "text/plain; charset=unicode; class=java.lang.Object", + "text/plain; class=java.lang.String", + "text/plain; class=java.io.Reader", + "text/plain; class=java.lang.Object", + "text/html", + "text/html; charset=unicode", + "text/html; charset=cp1251", + "text/html; charset=unicode; class=java.io.InputStream", + "text/html; charset=unicode; class=java.io.Serializable", + "text/html; charset=unicode; class=java.lang.Object", + "text/html; class=java.lang.String", + "text/html; class=java.io.Reader", + "text/html; class=java.lang.Object", + "text/unknown", + "text/unknown; charset=unicode", + "text/unknown; charset=cp1251", + "text/unknown; charset=unicode; class=java.io.InputStream", + "text/unknown; charset=unicode; class=java.io.Serializable", + "text/unknown; charset=unicode; class=java.lang.Object", + "text/unknown; class=java.lang.String", + "text/unknown; class=java.io.Reader", + "text/unknown; class=java.lang.Object", + "application/unknown; class=java.io.InputStream", + "application/unknown; class=java.lang.Object", + "application/unknown", + "application/x-java-jvm-local-objectref; class=java.io.InputStream", + "application/x-java-jvm-local-objectref; class=java.lang.Object", + "application/x-java-jvm-local-objectref", + "unknown/flavor", + "unknown/flavor; class=java.io.InputStream", + "unknown/flavor; class=java.lang.Object", + }; + + DataFlavor[] flavors = new DataFlavor[mimes.length]; + for (int i = 0; i < flavors.length; i++) { + flavors[i] = new DataFlavor(mimes[i]); + } + + testComparator(new DataTransferer.DataFlavorComparator(true), flavors); + testComparator(new DataTransferer.DataFlavorComparator(false), flavors); + + } + + private static void testComparator(Comparator cmp, DataFlavor[] flavs) + throws ClassNotFoundException { + + for (DataFlavor x: flavs) { + for (DataFlavor y: flavs) { + if (Math.signum(cmp.compare(x,y)) != -Math.signum(cmp.compare(y,x))) { + throw new RuntimeException("Antisymmetry violated: " + x + ", " + y); + } + if (cmp.compare(x,y) == 0 && !x.equals(y)) { + throw new RuntimeException("Equals rule violated: " + x + ", " + y); + } + for (DataFlavor z: flavs) { + if (cmp.compare(x,y) == 0) { + if (Math.signum(cmp.compare(x, z)) != Math.signum(cmp.compare(y, z))) { + throw new RuntimeException("Transitivity (1) violated: " + x + ", " + y + ", " + z); + } + } else { + if (Math.signum(cmp.compare(x, y)) == Math.signum(cmp.compare(y, z))) { + if (Math.signum(cmp.compare(x, y)) != Math.signum(cmp.compare(x, z))) { + throw new RuntimeException("Transitivity (2) violated: " + x + ", " + y + ", " + z); + } + } + } + } + } + } + } +} --- ./jdk/test/sun/awt/dnd/8024061/bug8024061.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/awt/dnd/8024061/bug8024061.java Wed Feb 04 12:14:43 2015 -0800 @@ -277,9 +277,6 @@ try { Transferable t = dtde.getTransferable(); Object data = t.getTransferData(DropObjectFlavor); - if (data != null) { - throw new Exception("getTransferData returned non-null"); - } } catch (Exception e) { dragEnterException = e; e.printStackTrace(); --- ./jdk/test/sun/awt/image/bug8038000.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/awt/image/bug8038000.java Wed Feb 04 12:14:43 2015 -0800 @@ -23,11 +23,13 @@ /** * @test - * @bug 8038000 + * @bug 8038000 8047066 * * @summary Verifies that we could create different type of Rasters with height 1 * and strideline which exceeds raster width. * Also checks that a set of RasterOp work correctly with such kind of Rasters. + * For 8047066 verifies that ColorConvertOp could process + * Raster (ByteBuffer + SinglePixelPackedSampleModel) * * @run main bug8038000 */ --- ./jdk/test/sun/invoke/util/ValueConversionsTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/invoke/util/ValueConversionsTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -25,11 +25,11 @@ import sun.invoke.util.ValueConversions; import sun.invoke.util.Wrapper; +import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; import java.lang.invoke.MethodHandle; import java.io.Serializable; import java.util.Arrays; -import java.util.Collections; import org.junit.Test; import static org.junit.Assert.*; @@ -37,24 +37,13 @@ * @summary unit tests for value-type conversion utilities * @compile -XDignore.symbol.file ValueConversionsTest.java * @run junit/othervm test.sun.invoke.util.ValueConversionsTest - * @run junit/othervm - * -DValueConversionsTest.MAX_ARITY=255 -DValueConversionsTest.START_ARITY=250 - * test.sun.invoke.util.ValueConversionsTest */ -// This might take a while and burn lots of metadata: -// @run junit/othervm -DValueConversionsTest.MAX_ARITY=255 -DValueConversionsTest.EXHAUSTIVE=true test.sun.invoke.util.ValueConversionsTest - /** * * @author jrose */ public class ValueConversionsTest { - private static final Class CLASS = ValueConversionsTest.class; - private static final int MAX_ARITY = Integer.getInteger(CLASS.getSimpleName()+".MAX_ARITY", 40); - private static final int START_ARITY = Integer.getInteger(CLASS.getSimpleName()+".START_ARITY", 0); - private static final boolean EXHAUSTIVE = Boolean.getBoolean(CLASS.getSimpleName()+".EXHAUSTIVE"); - @Test public void testUnbox() throws Throwable { testUnbox(false); @@ -66,9 +55,7 @@ } private void testUnbox(boolean doCast) throws Throwable { - //System.out.println("unbox"); for (Wrapper dst : Wrapper.values()) { - //System.out.println(dst); for (Wrapper src : Wrapper.values()) { testUnbox(doCast, dst, src); } @@ -78,6 +65,7 @@ private void testUnbox(boolean doCast, Wrapper dst, Wrapper src) throws Throwable { boolean expectThrow = !doCast && !dst.isConvertibleFrom(src); if (dst == Wrapper.OBJECT || src == Wrapper.OBJECT) return; // must have prims + if (dst == Wrapper.VOID || src == Wrapper.VOID ) return; // must have values if (dst == Wrapper.OBJECT) expectThrow = false; // everything (even VOID==null here) converts to OBJECT try { @@ -91,9 +79,9 @@ } MethodHandle unboxer; if (doCast) - unboxer = ValueConversions.unboxCast(dst.primitiveType()); + unboxer = ValueConversions.unboxCast(dst); else - unboxer = ValueConversions.unbox(dst.primitiveType()); + unboxer = ValueConversions.unboxWiden(dst); Object expResult = (box == null) ? dst.zero() : dst.wrap(box); Object result = null; switch (dst) { @@ -104,9 +92,7 @@ case CHAR: result = (char) unboxer.invokeExact(box); break; case BYTE: result = (byte) unboxer.invokeExact(box); break; case SHORT: result = (short) unboxer.invokeExact(box); break; - case OBJECT: result = (Object) unboxer.invokeExact(box); break; case BOOLEAN: result = (boolean) unboxer.invokeExact(box); break; - case VOID: result = null; unboxer.invokeExact(box); break; } if (expectThrow) { expResult = "(need an exception)"; @@ -123,25 +109,23 @@ @Test public void testBox() throws Throwable { - //System.out.println("box"); for (Wrapper w : Wrapper.values()) { - if (w == Wrapper.VOID) continue; // skip this; no unboxed form - //System.out.println(w); + if (w == Wrapper.VOID) continue; // skip this; no unboxed form + if (w == Wrapper.OBJECT) continue; // skip this; already unboxed for (int n = -5; n < 10; n++) { Object box = w.wrap(n); - MethodHandle boxer = ValueConversions.box(w.primitiveType()); + MethodHandle boxer = ValueConversions.boxExact(w); Object expResult = box; Object result = null; switch (w) { - case INT: result = boxer.invokeExact(/*int*/n); break; - case LONG: result = boxer.invokeExact((long)n); break; - case FLOAT: result = boxer.invokeExact((float)n); break; - case DOUBLE: result = boxer.invokeExact((double)n); break; - case CHAR: result = boxer.invokeExact((char)n); break; - case BYTE: result = boxer.invokeExact((byte)n); break; - case SHORT: result = boxer.invokeExact((short)n); break; - case OBJECT: result = boxer.invokeExact((Object)n); break; - case BOOLEAN: result = boxer.invokeExact((n & 1) != 0); break; + case INT: result = (Integer) boxer.invokeExact(/*int*/n); break; + case LONG: result = (Long) boxer.invokeExact((long)n); break; + case FLOAT: result = (Float) boxer.invokeExact((float)n); break; + case DOUBLE: result = (Double) boxer.invokeExact((double)n); break; + case CHAR: result = (Character) boxer.invokeExact((char)n); break; + case BYTE: result = (Byte) boxer.invokeExact((byte)n); break; + case SHORT: result = (Short) boxer.invokeExact((short)n); break; + case BOOLEAN: result = (Boolean) boxer.invokeExact((n & 1) != 0); break; } assertEquals("(dst,src,n,box)="+Arrays.asList(w,w,n,box), expResult, result); @@ -151,16 +135,14 @@ @Test public void testCast() throws Throwable { - //System.out.println("cast"); Class[] types = { Object.class, Serializable.class, String.class, Number.class, Integer.class }; Object[] objects = { new Object(), Boolean.FALSE, "hello", (Long)12L, (Integer)6 }; for (Class dst : types) { - MethodHandle caster = ValueConversions.cast(dst); - assertEquals(caster.type(), ValueConversions.identity().type()); + MethodHandle caster = ValueConversions.cast().bindTo(dst); + assertEquals(caster.type(), MethodHandles.identity(Object.class).type()); for (Object obj : objects) { Class src = obj.getClass(); boolean canCast = dst.isAssignableFrom(src); - //System.out.println("obj="+obj+" <: dst="+dst+(canCast ? " (OK)" : " (will fail)")); try { Object result = caster.invokeExact(obj); if (canCast) @@ -176,25 +158,12 @@ } @Test - public void testIdentity() throws Throwable { - //System.out.println("identity"); - MethodHandle id = ValueConversions.identity(); - Object expResult = "foo"; - Object result = id.invokeExact(expResult); - // compiler bug: ValueConversions.identity().invokeExact("bar"); - assertEquals(expResult, result); - } - - @Test public void testConvert() throws Throwable { - //System.out.println("convert"); for (long tval = 0, ctr = 0;;) { if (++ctr > 99999) throw new AssertionError("too many test values"); - // next test value: - //System.out.println(Long.toHexString(tval)); // prints 3776 test patterns + // prints 3776 test patterns (3776 = 8*59*8) tval = nextTestValue(tval); if (tval == 0) { - //System.out.println("test value count = "+ctr); // 3776 = 8*59*8 break; // repeat } } @@ -205,15 +174,12 @@ } } static void testConvert(Wrapper src, Wrapper dst, long tval) throws Throwable { - //System.out.println(src+" => "+dst); + if (dst == Wrapper.OBJECT || src == Wrapper.OBJECT) return; // must have prims + if (dst == Wrapper.VOID || src == Wrapper.VOID ) return; // must have values boolean testSingleCase = (tval != 0); final long tvalInit = tval; MethodHandle conv = ValueConversions.convertPrimitive(src, dst); - MethodType convType; - if (src == Wrapper.VOID) - convType = MethodType.methodType(dst.primitiveType() /* , void */); - else - convType = MethodType.methodType(dst.primitiveType(), src.primitiveType()); + MethodType convType = MethodType.methodType(dst.primitiveType(), src.primitiveType()); assertEquals(convType, conv.type()); MethodHandle converter = conv.asType(conv.type().changeReturnType(Object.class)); for (;;) { @@ -229,9 +195,7 @@ case CHAR: result = converter.invokeExact((char)n); break; case BYTE: result = converter.invokeExact((byte)n); break; case SHORT: result = converter.invokeExact((short)n); break; - case OBJECT: result = converter.invokeExact((Object)n); break; case BOOLEAN: result = converter.invokeExact((n & 1) != 0); break; - case VOID: result = converter.invokeExact(); break; default: throw new AssertionError(); } assertEquals("(src,dst,n,testValue)="+Arrays.asList(src,dst,"0x"+Long.toHexString(n),testValue), @@ -269,169 +233,4 @@ } return tweakSign(ux); } - - @Test - public void testVarargsArray() throws Throwable { - //System.out.println("varargsArray"); - final int MIN = START_ARITY; - final int MAX = MAX_ARITY-2; // 253+1 would cause parameter overflow with 'this' added - for (int nargs = MIN; nargs <= MAX; nargs = nextArgCount(nargs, 17, MAX)) { - MethodHandle target = ValueConversions.varargsArray(nargs); - Object[] args = new Object[nargs]; - for (int i = 0; i < nargs; i++) - args[i] = "#"+i; - Object res = target.invokeWithArguments(args); - assertArrayEquals(args, (Object[])res); - } - } - - @Test - public void testVarargsReferenceArray() throws Throwable { - //System.out.println("varargsReferenceArray"); - testTypedVarargsArray(Object[].class); - testTypedVarargsArray(String[].class); - testTypedVarargsArray(Number[].class); - } - - @Test - public void testVarargsPrimitiveArray() throws Throwable { - //System.out.println("varargsPrimitiveArray"); - testTypedVarargsArray(int[].class); - testTypedVarargsArray(long[].class); - testTypedVarargsArray(byte[].class); - testTypedVarargsArray(boolean[].class); - testTypedVarargsArray(short[].class); - testTypedVarargsArray(char[].class); - testTypedVarargsArray(float[].class); - testTypedVarargsArray(double[].class); - } - - private static int nextArgCount(int nargs, int density, int MAX) { - if (EXHAUSTIVE) return nargs + 1; - if (nargs >= MAX) return Integer.MAX_VALUE; - int BOT = 20, TOP = MAX-5; - if (density < 10) { BOT = 10; MAX = TOP-2; } - if (nargs <= BOT || nargs >= TOP) { - ++nargs; - } else { - int bump = Math.max(1, 100 / density); - nargs += bump; - if (nargs > TOP) nargs = TOP; - } - return nargs; - } - - private void testTypedVarargsArray(Class arrayType) throws Throwable { - //System.out.println(arrayType.getSimpleName()); - Class elemType = arrayType.getComponentType(); - int MIN = START_ARITY; - int MAX = MAX_ARITY-2; // 253+1 would cause parameter overflow with 'this' added - int density = 3; - if (elemType == int.class || elemType == long.class) density = 7; - if (elemType == long.class || elemType == double.class) { MAX /= 2; MIN /= 2; } - for (int nargs = MIN; nargs <= MAX; nargs = nextArgCount(nargs, density, MAX)) { - Object[] args = makeTestArray(elemType, nargs); - MethodHandle varargsArray = ValueConversions.varargsArray(arrayType, nargs); - MethodType vaType = varargsArray.type(); - assertEquals(arrayType, vaType.returnType()); - if (nargs != 0) { - assertEquals(elemType, vaType.parameterType(0)); - assertEquals(elemType, vaType.parameterType(vaType.parameterCount()-1)); - } - assertEquals(MethodType.methodType(arrayType, Collections.>nCopies(nargs, elemType)), - vaType); - Object res = varargsArray.invokeWithArguments(args); - String resString = toArrayString(res); - assertEquals(Arrays.toString(args), resString); - - MethodHandle spreader = varargsArray.asSpreader(arrayType, nargs); - MethodType stype = spreader.type(); - assert(stype == MethodType.methodType(arrayType, arrayType)); - if (nargs <= 5) { - // invoke target as a spreader also: - @SuppressWarnings("cast") - Object res2 = spreader.invokeWithArguments((Object)res); - String res2String = toArrayString(res2); - assertEquals(Arrays.toString(args), res2String); - // invoke the spreader on a generic Object[] array; check for error - try { - Object res3 = spreader.invokeWithArguments((Object)args); - String res3String = toArrayString(res3); - assertTrue(arrayType.getName(), arrayType.isAssignableFrom(Object[].class)); - assertEquals(Arrays.toString(args), res3String); - } catch (ClassCastException ex) { - assertFalse(arrayType.getName(), arrayType.isAssignableFrom(Object[].class)); - } - } - if (nargs == 0) { - // invoke spreader on null arglist - Object res3 = spreader.invokeWithArguments((Object)null); - String res3String = toArrayString(res3); - assertEquals(Arrays.toString(args), res3String); - } - } - } - - private static Object[] makeTestArray(Class elemType, int len) { - Wrapper elem = null; - if (elemType.isPrimitive()) - elem = Wrapper.forPrimitiveType(elemType); - else if (Wrapper.isWrapperType(elemType)) - elem = Wrapper.forWrapperType(elemType); - Object[] args = new Object[len]; - for (int i = 0; i < len; i++) { - Object arg = i * 100; - if (elem == null) { - if (elemType == String.class) - arg = "#"+arg; - arg = elemType.cast(arg); // just to make sure - } else { - switch (elem) { - case BOOLEAN: arg = (i % 3 == 0); break; - case CHAR: arg = 'a' + i; break; - case LONG: arg = (long)i * 1000_000_000; break; - case FLOAT: arg = (float)i / 100; break; - case DOUBLE: arg = (double)i / 1000_000; break; - } - arg = elem.cast(arg, elemType); - } - args[i] = arg; - } - //System.out.println(elemType.getName()+Arrays.toString(args)); - return args; - } - - private static String toArrayString(Object a) { - if (a == null) return "null"; - Class elemType = a.getClass().getComponentType(); - if (elemType == null) return a.toString(); - if (elemType.isPrimitive()) { - switch (Wrapper.forPrimitiveType(elemType)) { - case INT: return Arrays.toString((int[])a); - case BYTE: return Arrays.toString((byte[])a); - case BOOLEAN: return Arrays.toString((boolean[])a); - case SHORT: return Arrays.toString((short[])a); - case CHAR: return Arrays.toString((char[])a); - case FLOAT: return Arrays.toString((float[])a); - case LONG: return Arrays.toString((long[])a); - case DOUBLE: return Arrays.toString((double[])a); - } - } - return Arrays.toString((Object[])a); - } - - @Test - public void testVarargsList() throws Throwable { - //System.out.println("varargsList"); - final int MIN = START_ARITY; - final int MAX = MAX_ARITY-2; // 253+1 would cause parameter overflow with 'this' added - for (int nargs = MIN; nargs <= MAX; nargs = nextArgCount(nargs, 7, MAX)) { - MethodHandle target = ValueConversions.varargsList(nargs); - Object[] args = new Object[nargs]; - for (int i = 0; i < nargs; i++) - args[i] = "#"+i; - Object res = target.invokeWithArguments(args); - assertEquals(Arrays.asList(args), res); - } - } } --- ./jdk/test/sun/java2d/DirectX/OnScreenRenderingResizeTest/OnScreenRenderingResizeTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/java2d/DirectX/OnScreenRenderingResizeTest/OnScreenRenderingResizeTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,8 +89,13 @@ public void update(Graphics g) {} }; frame.setBackground(bgColor); + frame.setUndecorated(true); frame.pack(); - frame.setSize(FRAME_W, FRAME_H); + + GraphicsConfiguration gc = frame.getGraphicsConfiguration(); + Rectangle gcBounds = gc.getBounds(); + frame.setBounds(gcBounds.width / 4, gcBounds.height / 4, FRAME_W, FRAME_H); + frame.addWindowListener(new WindowAdapter() { public void windowClosing(WindowEvent e) { done = true; @@ -108,9 +113,8 @@ ex.printStackTrace(); } - GraphicsConfiguration gc = frame.getGraphicsConfiguration(); - int maxW = gc.getBounds().width /2; - int maxH = gc.getBounds().height/2; + int maxW = gcBounds.width /2; + int maxH = gcBounds.height/2; int minW = frame.getWidth(); int minH = frame.getHeight(); int incW = 10, incH = 10, cnt = 0; @@ -155,6 +159,7 @@ Insets in = frame.getInsets(); frame.getGraphics().drawImage(output, in.left, in.top, null); if (cnt == 90 && robot != null) { + robot.waitForIdle(); // area where we blitted to should be either white or green Point p = frame.getLocationOnScreen(); p.translate(in.left+10, in.top+10); @@ -172,7 +177,7 @@ frame.getWidth()-in.left-in.right, frame.getHeight()-in.top-in.bottom-5-IMAGE_H)); int accepted2[] = { Color.white.getRGB() }; - checkBI(bi, accepted1); + checkBI(bi, accepted2); } Thread.yield(); --- ./jdk/test/sun/jvmstat/monitor/MonitoredVm/CR6672135.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/jvmstat/monitor/MonitoredVm/CR6672135.java Wed Feb 04 12:14:43 2015 -0800 @@ -34,6 +34,7 @@ * @bug 6672135 * @summary setInterval() for local MonitoredHost and local MonitoredVm * @author Tomas Hurka + * @run main/othervm -XX:+UsePerfData CR6672135 */ public class CR6672135 { --- ./jdk/test/sun/jvmstat/monitor/MonitoredVm/MonitorVmStartTerminate.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/jvmstat/monitor/MonitoredVm/MonitorVmStartTerminate.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,228 +21,305 @@ * questions. */ -import java.util.concurrent.CountDownLatch; -import java.util.regex.*; -import java.util.*; +import java.io.File; +import java.io.IOException; import java.net.URISyntaxException; -import java.io.IOException; -import sun.jvmstat.monitor.*; -import sun.jvmstat.monitor.event.*; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Semaphore; -public class MonitorVmStartTerminate { +import jdk.testlibrary.OutputBuffer; +import jdk.testlibrary.ProcessTools; +import sun.jvmstat.monitor.MonitorException; +import sun.jvmstat.monitor.MonitoredHost; +import sun.jvmstat.monitor.MonitoredVm; +import sun.jvmstat.monitor.MonitoredVmUtil; +import sun.jvmstat.monitor.VmIdentifier; +import sun.jvmstat.monitor.event.HostEvent; +import sun.jvmstat.monitor.event.HostListener; +import sun.jvmstat.monitor.event.VmStatusChangeEvent; - private static final int SLEEPERS = 10; - private static final int SLEEPTIME = 5000; // sleep time for a sleeper - private static final int EXECINTERVAL = 3000; // wait time between exec's +/* - public static void main(String args[]) throws Exception { + Test starts ten Java processes, each with a unique id. - long now = System.currentTimeMillis(); + Each process creates a file named after the id and then it waits for + the test to remove the file, at which the Java process exits. - String sleeperArgs = SLEEPTIME + " " + now; - String sleeperPattern = "Sleeper " + sleeperArgs + " \\d+$"; + The processes are monitored by the test to make sure notifications + are sent when they are started/terminated. + + To avoid Java processes being left behind, in case of an unexpected + failure, shutdown hooks are installed that remove files when the test + exits. If files are not removed, i.e. due to a JVM crash, the Java + processes will exit themselves after 1000 s. + +*/ + +/* + * @test + * @bug 4990825 + * @summary attach to external but local JVM processes + * @library /lib/testlibrary + * @build jdk.testlibrary.* + * @run main/othervm MonitorVmStartTerminate + */ +public final class MonitorVmStartTerminate { + + private static final int PROCESS_COUNT = 10; + + public static void main(String... args) throws Exception { MonitoredHost host = MonitoredHost.getMonitoredHost("localhost"); - host.setInterval(200); + host.setInterval(1); // 1 ms - Matcher matcher = Pattern.compile(sleeperPattern).matcher(""); - SleeperListener listener = new SleeperListener(host, matcher, SLEEPERS); + String id = UUID.randomUUID().toString(); + + List javaProcesses = new ArrayList<>(); + for (int i = 0; i < PROCESS_COUNT; i++) { + javaProcesses.add(new JavaProcess(id + "_" + i)); + } + + Listener listener = new Listener(host, javaProcesses); host.addHostListener(listener); + for (JavaProcess javaProcess : javaProcesses) { + javaProcess.start(); + } - SleeperStarter ss = new SleeperStarter(SLEEPERS, EXECINTERVAL, - sleeperArgs); - ss.start(); + // Wait for all processes to start before terminating + // them, so pids are not reused within a poll interval. + System.out.println("Waiting for all processes to get started notification"); + listener.started.acquire(PROCESS_COUNT); - System.out.println("Waiting for " - + SLEEPERS + " sleepers to terminate"); - try { - ss.join(); - } catch (InterruptedException e) { - throw new Exception("Timed out waiting for sleepers"); + for (JavaProcess javaProcess : javaProcesses) { + javaProcess.terminate(); } - listener.waitForSleepersToStart(); - listener.waitForSleepersToTerminate(); + System.out.println("Waiting for all processes to get terminated notification"); + listener.terminated.acquire(PROCESS_COUNT); + + host.removeHostListener(listener); } - public static class SleeperListener implements HostListener { + private static final class Listener implements HostListener { + private final Semaphore started = new Semaphore(0); + private final Semaphore terminated = new Semaphore(0); + private final MonitoredHost host; + private final List processes; - private final List targets = new ArrayList<>(); - private final CountDownLatch terminateLatch; - private final CountDownLatch startLatch; - private final MonitoredHost host; - private final Matcher patternMatcher; - - public SleeperListener(MonitoredHost host, Matcher matcher, int count) { + public Listener(MonitoredHost host, List processes) { this.host = host; - this.patternMatcher = matcher; - this.terminateLatch = new CountDownLatch(count); - this.startLatch = new CountDownLatch(count); + this.processes = processes; + printStatus(); } - public void waitForSleepersToTerminate() throws InterruptedException { - terminateLatch.await(); + @Override + @SuppressWarnings("unchecked") + public void vmStatusChanged(VmStatusChangeEvent event) { + releaseStarted(event.getStarted()); + releaseTerminated(event.getTerminated()); + printStatus(); } - public void waitForSleepersToStart() throws InterruptedException { - startLatch.await(); + private void printStatus() { + System.out.printf("started=%d, terminated=%d\n", + started.availablePermits(), terminated.availablePermits()); } - private void printList(Set list, String msg) { - System.out.println(msg + ":"); - for (Integer lvmid : list) { - try { - VmIdentifier vmid = new VmIdentifier("//" + lvmid.intValue()); - MonitoredVm target = host.getMonitoredVm(vmid); + @Override + public void disconnected(HostEvent arg0) { + // ignore + } - StringMonitor cmdMonitor = - (StringMonitor)target.findByName("sun.rt.javaCommand"); - String cmd = cmdMonitor.stringValue(); + private void releaseStarted(Set ids) { + System.out.println("realeaseStarted(" + ids + ")"); + for (Integer id : ids) { + releaseStarted(id); + } + } - System.out.println("\t" + lvmid.intValue() + ": " - + "\"" + cmd + "\"" + ": "); - } catch (URISyntaxException e) { - System.err.println("Unexpected URISyntaxException: " - + e.getMessage()); - } catch (MonitorException e) { - System.out.println("\t" + lvmid.intValue() - + ": error reading monitoring data: " - + " target possibly terminated?"); + private void releaseStarted(Integer id) { + for (JavaProcess jp : processes) { + if (hasMainArgs(id, jp.getMainArgsIdentifier())) { + // store id for terminated identification + jp.setId(id); + System.out.println("RELEASED (id=" + jp.getId() + ", args=" + jp.getMainArgsIdentifier() + ")"); + started.release(); + return; } } } - - private int addStarted(Set started) { - int found = 0; - for (Integer lvmid : started) { - try { - VmIdentifier vmid = new VmIdentifier("//" + lvmid.intValue()); - MonitoredVm target = host.getMonitoredVm(vmid); - - StringMonitor cmdMonitor = - (StringMonitor)target.findByName("sun.rt.javaCommand"); - String cmd = cmdMonitor.stringValue(); - - patternMatcher.reset(cmd); - System.out.print("Started: " + lvmid.intValue() - + ": " + "\"" + cmd + "\"" + ": "); - - if (patternMatcher.matches()) { - System.out.println("matches pattern - recorded"); - targets.add(lvmid); - found++; - } - else { - System.out.println("does not match pattern - ignored"); - } - } catch (URISyntaxException e) { - System.err.println("Unexpected URISyntaxException: " - + e.getMessage()); - } catch (MonitorException e) { - System.err.println("Unexpected MonitorException: " - + e.getMessage()); - } - } - return found; - } - - private int removeTerminated(Set terminated) { - int found = 0; - for (Integer lvmid : terminated) { - /* - * we don't attempt to attach to the target here as it's - * now dead and has no jvmstat share memory file. Just see - * if the process id is among those that we saved when we - * started the targets (note - duplicated allowed and somewhat - * expected on windows); - */ - System.out.print("Terminated: " + lvmid.intValue() + ": "); - if (targets.contains(lvmid)) { - System.out.println("matches pattern - termination recorded"); - targets.remove(lvmid); - found++; - } - else { - System.out.println("does not match pattern - ignored"); - } - } - return found; - } - - @SuppressWarnings("unchecked") - public void vmStatusChanged(VmStatusChangeEvent ev) { - printList(ev.getActive(), "Active"); - printList(ev.getStarted(), "Started"); - printList(ev.getTerminated(), "Terminated"); - - int recentlyStarted = addStarted(ev.getStarted()); - int recentlyTerminated = removeTerminated(ev.getTerminated()); - - for (int i = 0; i < recentlyTerminated; i++) { - terminateLatch.countDown(); - } - for (int i = 0; i < recentlyStarted; i++) { - startLatch.countDown(); + private void releaseTerminated(Set ids) { + System.out.println("releaseTerminated(" + ids + ")"); + for (Integer id : ids) { + releaseTerminated(id); } } - public void disconnected(HostEvent ev) { + private void releaseTerminated(Integer id) { + for (JavaProcess jp : processes) { + if (id.equals(jp.getId())) { + System.out.println("RELEASED (id=" + jp.getId() + ", args=" + jp.getMainArgsIdentifier() + ")"); + terminated.release(); + return; + } + } + } + + private boolean hasMainArgs(Integer id, String args) { + try { + VmIdentifier vmid = new VmIdentifier("//" + id.intValue()); + MonitoredVm target = host.getMonitoredVm(vmid); + String monitoredArgs = MonitoredVmUtil.mainArgs(target); + if (monitoredArgs != null && monitoredArgs.contains(args)) { + return true; + } + } catch (URISyntaxException | MonitorException e) { + // ok. process probably not running + } + return false; } } - public static class SleeperStarter extends Thread { + public final static class JavaProcess { - private final JavaProcess[] processes; - private final int execInterval; - private final String args; + private static final class ShutdownHook extends Thread { + private final JavaProcess javaProcess; - public SleeperStarter(int sleepers, int execInterval, String args) { - this.execInterval = execInterval; - this.args = args; - this.processes = new JavaProcess[sleepers]; + public ShutdownHook(JavaProcess javaProcess) { + this.javaProcess = javaProcess; + } + + public void run() { + javaProcess.terminate(); + } } - private synchronized int active() { - int active = processes.length; - for(JavaProcess jp : processes) { - try { - jp.exitValue(); - active--; - } catch (IllegalThreadStateException e) { - // process hasn't exited yet - } + public static void main(String[] args) throws InterruptedException { + try { + Path path = Paths.get(args[0]); + createFile(path); + waitForRemoval(path); + } catch (Throwable t) { + t.printStackTrace(); + System.exit(1); } - return active; } - public void run() { - System.out.println("Starting " + processes.length + " sleepers"); + public Integer getId() { + return id; + } - String[] classpath = { - "-classpath", - System.getProperty("java.class.path") - }; + public void setId(Integer id) { + this.id = id; + } - for (int i = 0; i < processes.length; i++) { - try { - System.out.println("Starting Sleeper " + i); - synchronized(this) { - processes[i] = new JavaProcess("Sleeper", args + " " + i); - processes[i].addOptions(classpath); - } - processes[i].start(); - Thread.sleep(execInterval); - } catch (InterruptedException ignore) { - } catch (IOException e) { - System.err.println( - "IOException trying to start Sleeper " + i + ": " - + e.getMessage()); - } - } + private static void createFile(Path path) throws IOException { + Files.write(path, new byte[0], StandardOpenOption.CREATE); + } - // spin waiting for the processes to terminate - while (active() > 0) ; + private static void waitForRemoval(Path path) { + String timeoutFactorText = System.getProperty("test.timeout.factor", "1.0"); + double timeoutFactor = Double.parseDouble(timeoutFactorText); + long timeoutNanos = 1000_000_000L*(long)(1000*timeoutFactor); + long start = System.nanoTime(); + while (true) { + long now = System.nanoTime(); + long waited = now - start; + System.out.println("Waiting for " + path + " to be removed, " + waited + " ns"); + if (!Files.exists(path)) { + return; + } + if (waited > timeoutNanos) { + System.out.println("Start: " + start); + System.out.println("Now: " + now); + System.out.println("Process timed out after " + waited + " ns. Abort."); + System.exit(1); + } + takeNap(); + } + } + + private static void takeNap() { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + // ignore + } + } + + private final String mainArgsIdentifier; + private final ShutdownHook shutdownHook; + private volatile Integer id; + + public JavaProcess(String mainArgsIdentifier) { + this.mainArgsIdentifier = mainArgsIdentifier; + this.shutdownHook = new ShutdownHook(this); + } + + /** + * Starts a Java process asynchronously. + * + * The process runs until {@link #stop()} is called. If test exits + * unexpectedly the process will be cleaned up by a shutdown hook. + * + * @throws Exception + */ + public void start() throws Exception { + Runtime.getRuntime().addShutdownHook(shutdownHook); + System.out.println("Starting " + getMainArgsIdentifier()); + + Runnable r = new Runnable() { + @Override + public void run() { + try { + executeJava(); + } catch (Throwable t) { + t.printStackTrace(); + } + } + }; + new Thread(r).start(); + } + + public void terminate() { + try { + System.out.println("Terminating " + mainArgsIdentifier); + // File must be created before proceeding, + // otherwise Java process may loop forever + // waiting for file to be removed. + Path path = Paths.get(mainArgsIdentifier); + while (!Files.exists(path)) { + takeNap(); + } + Files.delete(path); + } catch (IOException e) { + e.printStackTrace(); + } + Runtime.getRuntime().removeShutdownHook(shutdownHook); + } + + private void executeJava() throws Exception, IOException { + String className = JavaProcess.class.getName(); + String classPath = System.getProperty("test.classes"); + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-Dtest.timeout.factor=" + System.getProperty("test.timeout.factor", "1.0"), + "-cp", classPath, className, mainArgsIdentifier); + OutputBuffer ob = ProcessTools.getOutput(pb.start()); + System.out.println("Java Process " + getMainArgsIdentifier() + " stderr:" + + ob.getStderr()); + System.err.println("Java Process " + getMainArgsIdentifier() + " stdout:" + + ob.getStdout()); + } + + public String getMainArgsIdentifier() { + return mainArgsIdentifier; } } } - --- ./jdk/test/sun/jvmstat/monitor/MonitoredVm/MonitorVmStartTerminate.sh Mon Dec 08 12:29:42 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,43 +0,0 @@ -# -# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# - -# -# @test -# @bug 4990825 -# @summary attach to external but local JVM processes -# @library ../../testlibrary -# @build Sleeper -# @build JavaProcess -# @build MonitorVmStartTerminate -# @run shell MonitorVmStartTerminate.sh -# - -. ${TESTSRC-.}/../../testlibrary/utils.sh - -setup -verify_os - -JAVA="${TESTJAVA}/bin/java" -CP=${TESTJAVA}${FS}lib${FS}tools.jar${PS}${TESTCLASSES}${PS}${TESTCLASSES}${FS}..${FS}..${FS}testlibrary - -${JAVA} ${TESTVMOPTS} -classpath ${CP} MonitorVmStartTerminate --- ./jdk/test/sun/jvmstat/testlibrary/JavaProcess.java Mon Dec 08 12:29:42 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,151 +0,0 @@ -/* - * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/** - * - */ - -import java.io.*; - -public class JavaProcess { - - protected Process process = null; - - private String classname; - private StringBuilder classArgs; - private StringBuilder javaOptions; - - private static String java = System.getProperty("java.home") - + File.separator + "bin" - + File.separator + "java"; - - public JavaProcess(String classname) { - this(classname, "", ""); - } - - public JavaProcess(String classname, String classArgs) { - this(classname, "", classArgs); - } - - public JavaProcess(String classname, String javaOptions, String classArgs) { - this.classname = classname; - this.javaOptions = new StringBuilder(javaOptions); - this.classArgs = new StringBuilder(classArgs); - } - - /** - * add java options to the java command - */ - public void addOptions(String[] opts) { - if (javaOptions != null && javaOptions.length() > 0) { - javaOptions.append(" "); - } - - for (int i = 0; i < opts.length; i++) { - if (i != 0) { - javaOptions.append(" "); - } - javaOptions.append(opts[i]); - } - } - - /** - * add arguments to the class arguments - */ - public void addArguments(String[] args) { - if (classArgs != null && classArgs.length() > 0) { - classArgs.append(" "); - } - - for (int i = 0; i < args.length; i++) { - if (i != 0) { - classArgs.append(" "); - } - classArgs.append(args[i]); - } - } - - /** - * start the java process - */ - public void start() throws IOException { - if (process != null) { - return; - } - - String javaCommand = java + " " + javaOptions + " " - + classname + " " + classArgs; - - System.out.println("exec'ing: " + javaCommand); - - process = Runtime.getRuntime().exec(javaCommand); - } - - /** - * destroy the java process - */ - public void destroy() { - if (process != null) { - process.destroy(); - } - process = null; - } - - public int exitValue() { - if (process != null) { - return process.exitValue(); - } - throw new RuntimeException("exitValue called with process == null"); - } - - public InputStream getErrorStream() { - if (process != null) { - return process.getErrorStream(); - } - throw new RuntimeException( - "getErrorStream() called with process == null"); - } - - public InputStream getInputStream() { - if (process != null) { - return process.getInputStream(); - } - throw new RuntimeException( - "getInputStream() called with process == null"); - } - - public OutputStream getOutputStream() { - if (process != null) { - return process.getOutputStream(); - } - throw new RuntimeException( - "getOutputStream() called with process == null"); - } - - public int waitFor() throws InterruptedException { - if (process != null) { - return process.waitFor(); - } - throw new RuntimeException("waitFor() called with process == null"); - } -} --- ./jdk/test/sun/management/jmxremote/bootstrap/JvmstatCountersTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/management/jmxremote/bootstrap/JvmstatCountersTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -143,9 +143,12 @@ String vmid = name.substring(0, name.indexOf("@")); System.out.println("vmid = " + vmid); VirtualMachine vm = VirtualMachine.attach(vmid); - String agent = vm.getSystemProperties().getProperty("java.home") + - File.separator + "lib" + File.separator + "management-agent.jar"; - vm.loadAgent(agent, "com.sun.management.jmxremote.port=0,com.sun.management.jmxremote.authenticate=false,com.sun.management.jmxremote.ssl=false"); + Properties p = new Properties(); + p.put("com.sun.management.jmxremote.port", "0"); + p.put("com.sun.management.jmxremote.authenticate", "false"); + p.put("com.sun.management.jmxremote.ssl", "false"); + vm.startManagementAgent(p); + vm.startLocalManagementAgent(); vm.detach(); String localAddress2 = ConnectorAddressLink.importFrom(0); if (localAddress2 == null) { --- ./jdk/test/sun/management/jmxremote/bootstrap/LocalManagementTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/management/jmxremote/bootstrap/LocalManagementTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,16 +79,6 @@ return doTest("1", "-Dcom.sun.management.jmxremote"); } - private static boolean test2() throws Exception { - Path agentPath = findAgent(); - if (agentPath != null) { - String agent = agentPath.toString(); - return doTest("2", "-javaagent:" + agent); - } else { - return false; - } - } - /** * no args (blank) - manager should attach and start agent */ @@ -97,68 +87,12 @@ } /** - * sanity check arguments to management-agent.jar - */ - private static boolean test4() throws Exception { - Path agentPath = findAgent(); - if (agentPath != null) { - ProcessBuilder builder = ProcessTools.createJavaProcessBuilder( - "-javaagent:" + agentPath.toString() + - "=com.sun.management.jmxremote.port=7775," + - "com.sun.management.jmxremote.authenticate=false," + - "com.sun.management.jmxremote.ssl=false", - "-cp", - TEST_CLASSPATH, - "TestApplication", - "-exit" - ); - - Process prc = null; - try { - prc = ProcessTools.startProcess( - "TestApplication", - builder - ); - int exitCode = prc.waitFor(); - return exitCode == 0; - } finally { - if (prc != null) { - prc.destroy(); - prc.waitFor(); - } - } - } - return false; - } - - /** * use DNS-only name service */ private static boolean test5() throws Exception { return doTest("5", "-Dsun.net.spi.namservice.provider.1=\"dns,sun\""); } - private static Path findAgent() { - FileSystem FS = FileSystems.getDefault(); - Path agentPath = FS.getPath( - TEST_JDK, "jre", "lib", "management-agent.jar" - ); - if (!isFileOk(agentPath)) { - agentPath = FS.getPath( - TEST_JDK, "lib", "management-agent.jar" - ); - } - if (!isFileOk(agentPath)) { - System.err.println("Can not locate management-agent.jar"); - return null; - } - return agentPath; - } - - private static boolean isFileOk(Path path) { - return Files.isRegularFile(path) && Files.isReadable(path); - } - private static boolean doTest(String testId, String arg) throws Exception { List args = new ArrayList<>(); args.add("-cp"); --- ./jdk/test/sun/management/jmxremote/bootstrap/TestManager.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/management/jmxremote/bootstrap/TestManager.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,6 @@ import static java.lang.management.ManagementFactory.*; import java.net.Socket; import java.net.InetSocketAddress; -import java.io.File; import java.io.IOException; // Sun specific @@ -55,28 +54,8 @@ * Starts the management agent in the target VM */ private static void startManagementAgent(String pid) throws IOException { - /* - * JAR file normally in ${java.home}/jre/lib but may be in ${java.home}/lib - * with development/non-images builds - */ - String home = System.getProperty("java.home"); - String agent = home + File.separator + "jre" + File.separator + "lib" - + File.separator + "management-agent.jar"; - File f = new File(agent); - if (!f.exists()) { - agent = home + File.separator + "lib" + File.separator + - "management-agent.jar"; - f = new File(agent); - if (!f.exists()) { - throw new RuntimeException("management-agent.jar missing"); - } - } - agent = f.getCanonicalPath(); - - System.out.println("Loading " + agent + " into target VM ..."); - try { - VirtualMachine.attach(pid).loadAgent(agent); + VirtualMachine.attach(pid).startLocalManagementAgent(); } catch (Exception x) { throw new IOException(x.getMessage()); } @@ -122,8 +101,7 @@ if (agentPropLocalConnectorAddress == null && jvmstatLocalConnectorAddress == null) { - // No JMX Connector address so attach to VM, and load - // management-agent.jar + // No JMX Connector address so attach to VM, and start local agent startManagementAgent(pid); agentPropLocalConnectorAddress = (String) vm.getAgentProperties().get(LOCAL_CONNECTOR_ADDRESS_PROP); --- ./jdk/test/sun/management/jmxremote/startstop/JMXStartStopTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/management/jmxremote/startstop/JMXStartStopTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,16 +31,13 @@ import java.rmi.registry.Registry; import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import javax.management.*; @@ -60,7 +57,6 @@ * JCMD achieves the desired results */ public class JMXStartStopTest { - private static final String TEST_JDK = System.getProperty("test.jdk"); private static final String TEST_SRC = System.getProperty("test.src"); private static final boolean verbose = false; @@ -76,8 +72,8 @@ QueryExp query) throws Exception { - Set names = server.queryNames(pattern,query); - for (Iterator i=names.iterator(); i.hasNext(); ) { + Set names = server.queryNames(pattern,query); + for (Iterator i = names.iterator(); i.hasNext(); ) { ObjectName name = (ObjectName)i.next(); MBeanInfo info = server.getMBeanInfo(name); dbg_print("Got MBean: " + name); @@ -87,7 +83,7 @@ continue; for (MBeanAttributeInfo attr : attrs) { if (attr.isReadable()) { - Object o = server.getAttribute(name, attr.getName()); + server.getAttribute(name, attr.getName()); } } } @@ -108,9 +104,8 @@ } JMXServiceURL url = new JMXServiceURL(jmxUrlStr); - Map m = new HashMap(); - JMXConnector c = JMXConnectorFactory.connect(url,m); + JMXConnector c = JMXConnectorFactory.connect(url, null); MBeanServerConnection conn = c.getMBeanServerConnection(); ObjectName pattern = new ObjectName("java.lang:type=Memory,*"); @@ -180,9 +175,8 @@ port); JMXServiceURL url = new JMXServiceURL(jmxUrlStr); - Map m = new HashMap(); - JMXConnector c = JMXConnectorFactory.connect(url,m); + JMXConnector c = JMXConnectorFactory.connect(url, null); MBeanServerConnection conn = c.getMBeanServerConnection(); ObjectName pattern = new ObjectName("java.lang:type=Memory,*"); @@ -273,25 +267,6 @@ } } - /** - * Retrieves the PID of the test application using JCMD - * @return The PID of the test application - * @throws InterruptedException - * @throws IOException - */ - private static String getPID() throws InterruptedException, IOException { - final AtomicReference pid = new AtomicReference<>(); - jcmd( - null, - line -> { - if (line.endsWith("JMXStartStopDoSomething")) { - pid.set(line.split(" ")[0]); - } - } - ); - return pid.get(); - } - private static class Something { private Process p; private final ProcessBuilder pb; @@ -434,7 +409,7 @@ private static final int port1 = 50234; private static final int port2 = 50235; - private static void test_01() throws Exception { + static void test_01() throws Exception { // Run an app with JMX enabled stop it and // restart on other port @@ -459,7 +434,7 @@ } } - private static void test_02() throws Exception { + static void test_02() throws Exception { // Run an app without JMX enabled // start JMX by jcmd @@ -478,7 +453,7 @@ } } - private static void test_03() throws Exception { + static void test_03() throws Exception { // Run an app without JMX enabled // start JMX by jcmd on one port than on other one @@ -507,7 +482,7 @@ } } - private static void test_04() throws Exception { + static void test_04() throws Exception { // Run an app without JMX enabled // start JMX by jcmd on one port, specify rmi port explicitly @@ -528,7 +503,7 @@ } } - private static void test_05() throws Exception { + static void test_05() throws Exception { // Run an app without JMX enabled, it will enable local server // but should leave remote server disabled @@ -545,7 +520,7 @@ } } - private static void test_06() throws Exception { + static void test_06() throws Exception { // Run an app without JMX enabled // start JMX by jcmd on one port, specify rmi port explicitly // attempt to start it again @@ -569,7 +544,7 @@ final boolean[] checks = new boolean[3]; jcmd( line -> { - if (line.equals("java.lang.RuntimeException: Invalid agent state")) { + if (line.contains("java.lang.RuntimeException: Invalid agent state")) { checks[0] = true; } }, @@ -580,7 +555,7 @@ jcmd( line -> { - if (line.equals("java.lang.RuntimeException: Invalid agent state")) { + if (line.contains("java.lang.RuntimeException: Invalid agent state")) { checks[1] = true; } }, @@ -648,7 +623,7 @@ } } - private static void test_08() throws Exception { + static void test_08() throws Exception { // Run an app with JMX enabled and with some properties set // in command line. // stop JMX agent and then start it again with different property values @@ -682,7 +657,7 @@ } } - private static void test_09() throws Exception { + static void test_09() throws Exception { // Run an app with JMX enabled and with some properties set // in command line. // stop JMX agent and then start it again with different property values @@ -718,7 +693,7 @@ } } - private static void test_10() throws Exception { + static void test_10() throws Exception { // Run an app with JMX enabled and with some properties set // in command line. // stop JMX agent and then start it again with different property values @@ -754,7 +729,7 @@ } } - private static void test_11() throws Exception { + static void test_11() throws Exception { // Run an app with JMX enabled // stop remote agent // make sure local agent is not affected @@ -775,7 +750,7 @@ } } - private static void test_12() throws Exception { + static void test_12() throws Exception { // Run an app with JMX disabled // start local agent only @@ -793,28 +768,4 @@ s.stop(); } } - - private static void test_13() throws Exception { - // Run an app with -javaagent make sure it works as expected - - // system properties are ignored - - System.out.println("**** Test thirteen ****"); - - String agent = TEST_JDK + "/jre/lib/management-agent.jar"; - if (!new File(agent).exists()) { - agent = TEST_JDK + "/lib/management-agent.jar"; - } - - Something s = doSomething("test_14", - "-javaagent:" + agent + "=com.sun.management.jmxremote.port=" + - port1 + ",com.sun.management.jmxremote.authenticate=false", - "-Dcom.sun.management.jmxremote.ssl=false" - ); - - try { - testNoConnect(port1); - } finally { - s.stop(); - } - } } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/sun/misc/URLClassPath/EnableLookupCache.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + * @bug 8061651 + * @summary -Dsun.cds.enableSharedLookupCache specified on the command-line + * should have no effect. + * @run main/othervm -Dsun.cds.enableSharedLookupCache=true -Xshare:off -Dfoo.foo.bar=xyz EnableLookupCache + */ + +public class EnableLookupCache { + public static void main(String[] args) throws Exception { + // If JVM is started with -Xshare:off, the sun.cds.enableSharedLookupCache + // should never be true, even if it has been explicitly set in the + // command-line. + String prop = "sun.cds.enableSharedLookupCache"; + String value = System.getProperty(prop); + System.out.println("System.getProperty(\"" + prop + "\") = \"" + value+ "\""); + + if ("true".equals(value)) { + System.out.println("Test FAILED: system property " + prop + + " is \"true\" (unexpected)"); + throw new RuntimeException(prop + " should not be " + value); + } + + // Make sure the -D... arguments in the @run tag are indeed used. + prop = "foo.foo.bar"; + value = System.getProperty(prop); + System.out.println("System.getProperty(\"" + prop + "\") = \"" + value+ "\""); + if (!"xyz".equals(value)) { + System.out.println("Test FAILED: system property " + prop + + " should be \"xyz\" -- is JTREG set up properly?"); + throw new RuntimeException(prop + " should not be " + value); + } + + + // We should be able to load the other classes without issue. + A.test(); + B.test(); + System.out.println("Test PASSED"); + } + + static class A {static void test() {}} + static class B {static void test() {}} +} + --- ./jdk/test/sun/net/ftp/FtpURL.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/net/ftp/FtpURL.java Wed Feb 04 12:14:43 2015 -0800 @@ -483,7 +483,7 @@ // Now let's check the URL handler - url = new URL("ftp://user2:@localhost:" + port + "/%2Fusr/bin;type=d"); + url = new URL("ftp://user2@localhost:" + port + "/%2Fusr/bin;type=d"); con = url.openConnection(); in = new BufferedReader(new InputStreamReader(con.getInputStream())); do { --- ./jdk/test/sun/net/www/http/HttpClient/StreamingRetry.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/net/www/http/HttpClient/StreamingRetry.java Wed Feb 04 12:14:43 2015 -0800 @@ -23,8 +23,8 @@ /* * @test - * @bug 6672144 - * @summary HttpURLConnection.getInputStream sends POST request after failed chunked send + * @bug 6672144 8050983 + * @summary Do not retry failed request with a streaming body. */ import java.net.HttpURLConnection; @@ -33,31 +33,41 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import static java.lang.System.out; public class StreamingRetry implements Runnable { static final int ACCEPT_TIMEOUT = 20 * 1000; // 20 seconds - ServerSocket ss; + volatile ServerSocket ss; - public static void main(String[] args) throws IOException { + public static void main(String[] args) throws Exception { (new StreamingRetry()).instanceMain(); } - void instanceMain() throws IOException { - test(); + void instanceMain() throws Exception { + out.println("Test with default method"); + test(null); + out.println("Test with POST method"); + test("POST"); + out.println("Test with PUT method"); + test("PUT"); + if (failed > 0) throw new RuntimeException("Some tests failed"); } - void test() throws IOException { + void test(String method) throws Exception { ss = new ServerSocket(0); ss.setSoTimeout(ACCEPT_TIMEOUT); int port = ss.getLocalPort(); - (new Thread(this)).start(); + Thread otherThread = new Thread(this); + otherThread.start(); try { URL url = new URL("http://localhost:" + port + "/"); HttpURLConnection uc = (HttpURLConnection) url.openConnection(); uc.setDoOutput(true); + if (method != null) + uc.setRequestMethod(method); uc.setChunkedStreamingMode(4096); OutputStream os = uc.getOutputStream(); os.write("Hello there".getBytes()); @@ -68,6 +78,7 @@ //expected.printStackTrace(); } finally { ss.close(); + otherThread.join(); } } @@ -79,7 +90,7 @@ ss.close(); fail("The server shouldn't accept a second connection"); } catch (IOException e) { - //OK, the clien will close the server socket if successfull + //OK, the client will close the server socket if successful } } --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/sun/security/jgss/GssMemoryIssues.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8028780 + * @summary JDK KRB5 module throws OutOfMemoryError when CCache is corrupt + * @run main/othervm -Xmx8m GssMemoryIssues + */ + +import org.ietf.jgss.GSSException; +import org.ietf.jgss.GSSManager; +import org.ietf.jgss.GSSName; + +public class GssMemoryIssues { + + public static void main(String[] argv) throws Exception { + GSSManager man = GSSManager.getInstance(); + String s = "me@REALM"; + GSSName name = man.createName(s, GSSName.NT_USER_NAME); + byte[] exported = name.export(); + // Offset of the length of the mech name. Length in big endian + int lenOffset = exported.length - s.length() - 4; + // Make it huge + exported[lenOffset] = 0x7f; + try { + man.createName(exported, GSSName.NT_EXPORT_NAME); + } catch (GSSException gsse) { + System.out.println(gsse); + } + } +} --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/sun/security/jgss/spnego/NotPreferredMech.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8048194 + * @run main/othervm NotPreferredMech + * @summary GSSContext.acceptSecContext fails when a supported mech is not initiator preferred + */ + +import org.ietf.jgss.*; +import sun.security.jgss.*; +import sun.security.jgss.spnego.NegTokenInit; +import sun.security.jgss.spnego.NegTokenTarg; +import sun.security.util.BitArray; +import sun.security.util.DerOutputStream; +import sun.security.util.DerValue; +import sun.security.util.ObjectIdentifier; + +import java.io.ByteArrayOutputStream; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; + +public class NotPreferredMech { + + public static void main(String[] argv) throws Exception { + + // Generates a NegTokenInit mechTypes field, with an + // unsupported mech as the preferred. + DerOutputStream mech = new DerOutputStream(); + mech.write(new Oid("1.2.3.4").getDER()); + mech.write(GSSUtil.GSS_KRB5_MECH_OID.getDER()); + DerOutputStream mechTypeList = new DerOutputStream(); + mechTypeList.write(DerValue.tag_Sequence, mech); + + // Generates a NegTokenInit mechToken field for 1.2.3.4 mech + GSSHeader h1 = new GSSHeader(new ObjectIdentifier("1.2.3.4"), 1); + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + h1.encode(bout); + bout.write(new byte[1]); + + // Generates the NegTokenInit token + Constructor ctor = NegTokenInit.class.getDeclaredConstructor( + byte[].class, BitArray.class, byte[].class, byte[].class); + ctor.setAccessible(true); + NegTokenInit initToken = ctor.newInstance( + mechTypeList.toByteArray(), + new BitArray(0), + bout.toByteArray(), + null); + Method m = Class.forName("sun.security.jgss.spnego.SpNegoToken") + .getDeclaredMethod("getEncoded"); + m.setAccessible(true); + byte[] spnegoToken = (byte[])m.invoke(initToken); + + // and wraps it into a GSSToken + GSSHeader h = new GSSHeader( + new ObjectIdentifier(GSSUtil.GSS_SPNEGO_MECH_OID.toString()), + spnegoToken.length); + bout = new ByteArrayOutputStream(); + h.encode(bout); + bout.write(spnegoToken); + byte[] token = bout.toByteArray(); + + // and feeds it to a GSS acceptor + GSSManager man = GSSManager.getInstance(); + GSSContext ctxt = man.createContext((GSSCredential) null); + token = ctxt.acceptSecContext(token, 0, token.length); + NegTokenTarg targ = new NegTokenTarg(token); + + // Make sure it's a GO-ON message + Method m2 = NegTokenTarg.class.getDeclaredMethod("getNegotiatedResult"); + m2.setAccessible(true); + int negResult = (int)m2.invoke(targ); + + if (negResult != 1 /* ACCEPT_INCOMPLETE */) { + throw new Exception("Not a continue"); + } + } +} --- ./jdk/test/sun/security/krb5/TimeInCCache.java Mon Dec 08 12:29:42 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -/* - * @test - * @bug 6590930 - * @run main/othervm TimeInCCache - * @summary read/write does not match for ccache - */ - -import java.io.ByteArrayInputStream; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import sun.security.krb5.internal.ccache.CCacheInputStream; -import sun.security.krb5.internal.ccache.Credentials; - -public class TimeInCCache { - public static void main(String[] args) throws Exception { - // A trivial cache file, with startdate and renewTill being zero. - // The endtime is set to sometime in year 2022, so that isValid() - // will always check starttime. - byte[] ccache = new byte[]{ - 5, 4, 0, 12, 0, 1, 0, 8, -1, -1, -1, 19, -1, -2, 89, 51, - 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 10, 77, 65, 88, 73, - 46, 76, 79, 67, 65, 76, 0, 0, 0, 5, 100, 117, 109, 109, 121, 0, - 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 10, 77, 65, 88, 73, 46, - 76, 79, 67, 65, 76, 0, 0, 0, 5, 100, 117, 109, 109, 121, 0, 0, - 0, 0, 0, 0, 0, 2, 0, 0, 0, 10, 77, 65, 88, 73, 46, 76, - 79, 67, 65, 76, 0, 0, 0, 6, 107, 114, 98, 116, 103, 116, 0, 0, - 0, 10, 77, 65, 88, 73, 46, 76, 79, 67, 65, 76, 0, 17, 0, 0, - 0, 16, -78, -85, -90, -50, -68, 115, 68, 8, -39, -109, 91, 61, -17, -27, - -122, -120, 71, 69, 16, -121, 0, 0, 0, 0, 98, 69, 16, -121, 0, 0, - 0, 0, 0, 64, -32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 0, 97, -127, -3, 48, -127, -6, -96, 3, 2, 1, 5, -95, 12, - 27, 10, 77, 65, 88, 73, 46, 76, 79, 67, 65, 76, -94, 31, 48, 29, - -96, 3, 2, 1, 0, -95, 22, 48, 20, 27, 6, 107, 114, 98, 116, 103, - 116, 27, 10, 77, 65, 88, 73, 46, 76, 79, 67, 65, 76, -93, -127, -61, - 48, -127, -64, -96, 3, 2, 1, 17, -95, 3, 2, 1, 1, -94, -127, -77, - 4, -127, -80, 43, 65, -66, 34, 21, -34, 37, 35, 32, 50, -14, 122, 77, - -3, -29, 37, 99, 50, 125, -43, -96, -78, 85, 23, 41, -80, 68, 2, -109, - -27, 38, -41, -72, -32, 127, 63, -76, -22, 81, 33, -114, -30, 104, 125, -81, - -29, 70, -25, 23, 100, -75, -25, 62, -120, -78, -61, -100, -74, 50, -117, -127, - -16, 79, -106, 62, -39, 91, 100, -10, 23, -88, -18, -47, 51, -19, 113, 18, - 98, -101, 31, 98, 22, -81, 11, -41, -42, 67, 87, 92, -2, 42, -54, 79, - 49, -90, 43, -37, 90, -102, 125, 62, -88, -77, 100, 102, 23, -57, -51, 38, - 68, -44, -57, -102, 103, -6, 85, -58, 74, -117, -87, 67, -103, -36, 110, -122, - 115, 12, 118, -106, -114, -51, 79, 68, 32, -91, -53, -5, -51, 89, 72, 70, - 123, -12, -95, 9, 40, -30, -117, 74, 77, 38, 91, 126, -82, 17, 98, 98, - -49, 78, 36, 36, 103, -76, -100, -23, 118, -92, -8, 80, 103, -23, -98, 56, - 21, 65, -77, 0, 0, 0, 0 - }; - System.setProperty("sun.security.krb5.debug", "true"); // test code changes in DEBUG - CCacheInputStream cis = new CCacheInputStream(new ByteArrayInputStream(ccache)); - cis.readVersion(); - cis.readTag(); - cis.readPrincipal(0x504); - Method m = CCacheInputStream.class.getDeclaredMethod("readCred", Integer.TYPE); - m.setAccessible(true); - Credentials c = (Credentials) m.invoke(cis, new Integer(0x504)); - sun.security.krb5.Credentials cc = c.setKrbCreds(); - - // 1. Make sure starttime is still null - if (cc.getStartTime() != null) { - throw new Exception("Fail, starttime should be zero here"); - } - - // 2. Make sure renewTill is still null - if (cc.getRenewTill() != null) { - throw new Exception("Fail, renewTill should be zero here"); - } - - // 3. Make sure isValid works - c.isValid(); - } -} --- ./jdk/test/sun/security/krb5/auto/S4U2proxy.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/security/krb5/auto/S4U2proxy.java Wed Feb 04 12:14:43 2015 -0800 @@ -23,7 +23,7 @@ /* * @test - * @bug 6355584 + * @bug 6355584 8044215 * @summary Introduce constrained Kerberos delegation * @compile -XDignore.symbol.file S4U2proxy.java * @run main/othervm S4U2proxy krb5 @@ -69,6 +69,10 @@ Context p = s.delegated(); p.startAsClient(OneKDC.BACKEND, mech); + + // 8044215: requestCredDeleg is useless and harmless + p.x().requestCredDeleg(true); + b.startAsServer(mech); Context.handshake(p, b); --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ ./jdk/test/sun/security/krb5/ccache/CorruptedCC.java Wed Feb 04 12:14:43 2015 -0800 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8028780 + * @summary JDK KRB5 module throws OutOfMemoryError when CCache is corrupt + * @run main/othervm -Xmx8m CorruptedCC + */ +import java.nio.file.Files; +import java.nio.file.Paths; +import sun.security.krb5.internal.ccache.CredentialsCache; + +public class CorruptedCC { + public static void main(String[] args) throws Exception { + for (int i=0; i cardTerminals = terminalFactory.terminals().list(); + System.out.println("Terminals: " + cardTerminals); + if (cardTerminals.isEmpty()) { + throw new Exception("No card terminals available"); + } + CardTerminal cardTerminal = cardTerminals.get(0); + Card card = cardTerminal.connect("DIRECT"); + card.disconnect(true); + + System.out.println("OK."); + } +} --- ./jdk/test/sun/security/smartcardio/TestExclusive.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/security/smartcardio/TestExclusive.java Wed Feb 04 12:14:43 2015 -0800 @@ -30,10 +30,11 @@ * @run main/manual TestExclusive */ -import java.io.*; -import java.util.*; - -import javax.smartcardio.*; +import javax.smartcardio.Card; +import javax.smartcardio.CardChannel; +import javax.smartcardio.CardException; +import javax.smartcardio.CardTerminal; +import javax.smartcardio.CommandAPDU; public class TestExclusive extends Utils { @@ -83,9 +84,9 @@ Thread.sleep(1000); // disconnect - card.disconnect(false); + card.disconnect(true); - if (otherOK == false) { + if (! otherOK) { throw new Exception("Secondary thread failed"); } --- ./jdk/test/sun/security/smartcardio/TestMultiplePresent.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/security/smartcardio/TestMultiplePresent.java Wed Feb 04 12:14:43 2015 -0800 @@ -30,10 +30,10 @@ * @run main/manual TestPresent */ -import java.io.*; -import java.util.*; - -import javax.smartcardio.*; +import java.util.List; +import javax.smartcardio.CardTerminal; +import javax.smartcardio.CardTerminals; +import javax.smartcardio.TerminalFactory; import static javax.smartcardio.CardTerminals.State.*; public class TestMultiplePresent { --- ./jdk/test/sun/security/smartcardio/TestPresent.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/security/smartcardio/TestPresent.java Wed Feb 04 12:14:43 2015 -0800 @@ -30,10 +30,9 @@ * @run main/manual TestPresent */ -import java.io.*; -import java.util.*; - -import javax.smartcardio.*; +import java.util.List; +import javax.smartcardio.CardTerminal; +import javax.smartcardio.TerminalFactory; public class TestPresent { --- ./jdk/test/sun/security/smartcardio/TestTransmit.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/security/smartcardio/TestTransmit.java Wed Feb 04 12:14:43 2015 -0800 @@ -30,10 +30,16 @@ * @run main/manual TestTransmit */ -import java.io.*; -import java.util.*; - -import javax.smartcardio.*; +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.FileReader; +import java.io.IOException; +import java.io.StringReader; +import javax.smartcardio.Card; +import javax.smartcardio.CardChannel; +import javax.smartcardio.CardTerminal; +import javax.smartcardio.CommandAPDU; +import javax.smartcardio.ResponseAPDU; public class TestTransmit extends Utils { @@ -78,7 +84,7 @@ } // disconnect - card.disconnect(false); + card.disconnect(true); System.out.println("OK."); } --- ./jdk/test/sun/security/smartcardio/Utils.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/security/smartcardio/Utils.java Wed Feb 04 12:14:43 2015 -0800 @@ -24,10 +24,16 @@ // common utility functions for the PC/SC tests -import javax.smartcardio.*; - -import java.io.*; -import java.util.*; +import java.io.StringReader; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import javax.smartcardio.CardTerminal; +import javax.smartcardio.CardChannel; +import javax.smartcardio.ResponseAPDU; +import javax.smartcardio.CommandAPDU; +import javax.smartcardio.TerminalFactory; public class Utils { --- ./jdk/test/sun/text/resources/LocaleData Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/text/resources/LocaleData Wed Feb 04 12:14:43 2015 -0800 @@ -2502,7 +2502,7 @@ CalendarData/pl_PL/minimalDaysInFirstWeek=4 CalendarData/pt_PT/minimalDaysInFirstWeek=4 -#bug 4945388 +#bug 4945388 CurrencyNames/be_BY/BYR=\u0420\u0443\u0431 CurrencyNames/bg_BG/BGN=\u043B\u0432. @@ -5422,7 +5422,7 @@ FormatData/en_SG/DatePatterns/2=MMM d, yyyy FormatData/en_SG/DatePatterns/3=M/d/yy FormatData/en_SG/DateTimePatterns/0={1} {0} -# Use approved data +# Use approved data FormatData/ms/Eras/0=BCE FormatData/ms/Eras/1=CE FormatData/sr_BA/MonthNames/5=\u0458\u0443\u043d\u0438 @@ -5571,7 +5571,7 @@ FormatData/fi/AmPmMarkers/0=ap. FormatData/fi/AmPmMarkers/1=ip. -# bug 6507067 +# bug 6507067 TimeZoneNames/zh_TW/Asia\/Taipei/1=\u53f0\u7063\u6a19\u6e96\u6642\u9593 TimeZoneNames/zh_TW/Asia\/Taipei/2=TST @@ -7702,3 +7702,577 @@ # bug 8055222 CurrencyNames/lt_LT/EUR=\u20AC + +# bug 8042126 + missing MonthNarrows data +FormatData//MonthNarrows/0=1 +FormatData//MonthNarrows/1=2 +FormatData//MonthNarrows/2=3 +FormatData//MonthNarrows/3=4 +FormatData//MonthNarrows/4=5 +FormatData//MonthNarrows/5=6 +FormatData//MonthNarrows/6=7 +FormatData//MonthNarrows/7=8 +FormatData//MonthNarrows/8=9 +FormatData//MonthNarrows/9=10 +FormatData//MonthNarrows/10=11 +FormatData//MonthNarrows/11=12 +FormatData//MonthNarrows/12= +FormatData/bg/MonthNarrows/0=\u044f +FormatData/bg/MonthNarrows/1=\u0444 +FormatData/bg/MonthNarrows/2=\u043c +FormatData/bg/MonthNarrows/3=\u0430 +FormatData/bg/MonthNarrows/4=\u043c +FormatData/bg/MonthNarrows/5=\u044e +FormatData/bg/MonthNarrows/6=\u044e +FormatData/bg/MonthNarrows/7=\u0430 +FormatData/bg/MonthNarrows/8=\u0441 +FormatData/bg/MonthNarrows/9=\u043e +FormatData/bg/MonthNarrows/10=\u043d +FormatData/bg/MonthNarrows/11=\u0434 +FormatData/bg/MonthNarrows/12= +FormatData/zh_TW/MonthNarrows/0=1 +FormatData/zh_TW/MonthNarrows/1=2 +FormatData/zh_TW/MonthNarrows/2=3 +FormatData/zh_TW/MonthNarrows/3=4 +FormatData/zh_TW/MonthNarrows/4=5 +FormatData/zh_TW/MonthNarrows/5=6 +FormatData/zh_TW/MonthNarrows/6=7 +FormatData/zh_TW/MonthNarrows/7=8 +FormatData/zh_TW/MonthNarrows/8=9 +FormatData/zh_TW/MonthNarrows/9=10 +FormatData/zh_TW/MonthNarrows/10=11 +FormatData/zh_TW/MonthNarrows/11=12 +FormatData/zh_TW/MonthNarrows/12= +FormatData/it/MonthNarrows/0=G +FormatData/it/MonthNarrows/1=F +FormatData/it/MonthNarrows/2=M +FormatData/it/MonthNarrows/3=A +FormatData/it/MonthNarrows/4=M +FormatData/it/MonthNarrows/5=G +FormatData/it/MonthNarrows/6=L +FormatData/it/MonthNarrows/7=A +FormatData/it/MonthNarrows/8=S +FormatData/it/MonthNarrows/9=O +FormatData/it/MonthNarrows/10=N +FormatData/it/MonthNarrows/11=D +FormatData/it/MonthNarrows/12= +FormatData/ko/MonthNarrows/0=1\uc6d4 +FormatData/ko/MonthNarrows/1=2\uc6d4 +FormatData/ko/MonthNarrows/2=3\uc6d4 +FormatData/ko/MonthNarrows/3=4\uc6d4 +FormatData/ko/MonthNarrows/4=5\uc6d4 +FormatData/ko/MonthNarrows/5=6\uc6d4 +FormatData/ko/MonthNarrows/6=7\uc6d4 +FormatData/ko/MonthNarrows/7=8\uc6d4 +FormatData/ko/MonthNarrows/8=9\uc6d4 +FormatData/ko/MonthNarrows/9=10\uc6d4 +FormatData/ko/MonthNarrows/10=11\uc6d4 +FormatData/ko/MonthNarrows/11=12\uc6d4 +FormatData/ko/MonthNarrows/12= +FormatData/uk/MonthNarrows/0=\u0421 +FormatData/uk/MonthNarrows/1=\u041b +FormatData/uk/MonthNarrows/2=\u0411 +FormatData/uk/MonthNarrows/3=\u041a +FormatData/uk/MonthNarrows/4=\u0422 +FormatData/uk/MonthNarrows/5=\u0427 +FormatData/uk/MonthNarrows/6=\u041b +FormatData/uk/MonthNarrows/7=\u0421 +FormatData/uk/MonthNarrows/8=\u0412 +FormatData/uk/MonthNarrows/9=\u0416 +FormatData/uk/MonthNarrows/10=\u041b +FormatData/uk/MonthNarrows/11=\u0413 +FormatData/uk/MonthNarrows/12= +FormatData/lv/MonthNarrows/0=J +FormatData/lv/MonthNarrows/1=F +FormatData/lv/MonthNarrows/2=M +FormatData/lv/MonthNarrows/3=A +FormatData/lv/MonthNarrows/4=M +FormatData/lv/MonthNarrows/5=J +FormatData/lv/MonthNarrows/6=J +FormatData/lv/MonthNarrows/7=A +FormatData/lv/MonthNarrows/8=S +FormatData/lv/MonthNarrows/9=O +FormatData/lv/MonthNarrows/10=N +FormatData/lv/MonthNarrows/11=D +FormatData/lv/MonthNarrows/12= +FormatData/pt/MonthNarrows/0=J +FormatData/pt/MonthNarrows/1=F +FormatData/pt/MonthNarrows/2=M +FormatData/pt/MonthNarrows/3=A +FormatData/pt/MonthNarrows/4=M +FormatData/pt/MonthNarrows/5=J +FormatData/pt/MonthNarrows/6=J +FormatData/pt/MonthNarrows/7=A +FormatData/pt/MonthNarrows/8=S +FormatData/pt/MonthNarrows/9=O +FormatData/pt/MonthNarrows/10=N +FormatData/pt/MonthNarrows/11=D +FormatData/pt/MonthNarrows/12= +FormatData/sk/MonthNarrows/0=j +FormatData/sk/MonthNarrows/1=f +FormatData/sk/MonthNarrows/2=m +FormatData/sk/MonthNarrows/3=a +FormatData/sk/MonthNarrows/4=m +FormatData/sk/MonthNarrows/5=j +FormatData/sk/MonthNarrows/6=j +FormatData/sk/MonthNarrows/7=a +FormatData/sk/MonthNarrows/8=s +FormatData/sk/MonthNarrows/9=o +FormatData/sk/MonthNarrows/10=n +FormatData/sk/MonthNarrows/11=d +FormatData/sk/MonthNarrows/12= +FormatData/hi_IN/MonthNarrows/0=\u091c +FormatData/hi_IN/MonthNarrows/1=\u092b\u093c +FormatData/hi_IN/MonthNarrows/2=\u092e\u093e +FormatData/hi_IN/MonthNarrows/3=\u0905 +FormatData/hi_IN/MonthNarrows/4=\u092e +FormatData/hi_IN/MonthNarrows/5=\u091c\u0942 +FormatData/hi_IN/MonthNarrows/6=\u091c\u0941 +FormatData/hi_IN/MonthNarrows/7=\u0905 +FormatData/hi_IN/MonthNarrows/8=\u0938\u093f +FormatData/hi_IN/MonthNarrows/9=\u0905 +FormatData/hi_IN/MonthNarrows/10=\u0928 +FormatData/hi_IN/MonthNarrows/11=\u0926\u093f +FormatData/hi_IN/MonthNarrows/12= +FormatData/ga/MonthNarrows/0=E +FormatData/ga/MonthNarrows/1=F +FormatData/ga/MonthNarrows/2=M +FormatData/ga/MonthNarrows/3=A +FormatData/ga/MonthNarrows/4=B +FormatData/ga/MonthNarrows/5=M +FormatData/ga/MonthNarrows/6=I +FormatData/ga/MonthNarrows/7=L +FormatData/ga/MonthNarrows/8=M +FormatData/ga/MonthNarrows/9=D +FormatData/ga/MonthNarrows/10=S +FormatData/ga/MonthNarrows/11=N +FormatData/ga/MonthNarrows/12= +FormatData/et/MonthNarrows/0=J +FormatData/et/MonthNarrows/1=V +FormatData/et/MonthNarrows/2=M +FormatData/et/MonthNarrows/3=A +FormatData/et/MonthNarrows/4=M +FormatData/et/MonthNarrows/5=J +FormatData/et/MonthNarrows/6=J +FormatData/et/MonthNarrows/7=A +FormatData/et/MonthNarrows/8=S +FormatData/et/MonthNarrows/9=O +FormatData/et/MonthNarrows/10=N +FormatData/et/MonthNarrows/11=D +FormatData/et/MonthNarrows/12= +FormatData/sv/MonthNarrows/0=J +FormatData/sv/MonthNarrows/1=F +FormatData/sv/MonthNarrows/2=M +FormatData/sv/MonthNarrows/3=A +FormatData/sv/MonthNarrows/4=M +FormatData/sv/MonthNarrows/5=J +FormatData/sv/MonthNarrows/6=J +FormatData/sv/MonthNarrows/7=A +FormatData/sv/MonthNarrows/8=S +FormatData/sv/MonthNarrows/9=O +FormatData/sv/MonthNarrows/10=N +FormatData/sv/MonthNarrows/11=D +FormatData/sv/MonthNarrows/12= +FormatData/cs/MonthNarrows/0=l +FormatData/cs/MonthNarrows/1=\u00fa +FormatData/cs/MonthNarrows/2=b +FormatData/cs/MonthNarrows/3=d +FormatData/cs/MonthNarrows/4=k +FormatData/cs/MonthNarrows/5=\u010d +FormatData/cs/MonthNarrows/6=\u010d +FormatData/cs/MonthNarrows/7=s +FormatData/cs/MonthNarrows/8=z +FormatData/cs/MonthNarrows/9=\u0159 +FormatData/cs/MonthNarrows/10=l +FormatData/cs/MonthNarrows/11=p +FormatData/cs/MonthNarrows/12= +FormatData/el/MonthNarrows/0=\u0399 +FormatData/el/MonthNarrows/1=\u03a6 +FormatData/el/MonthNarrows/2=\u039c +FormatData/el/MonthNarrows/3=\u0391 +FormatData/el/MonthNarrows/4=\u039c +FormatData/el/MonthNarrows/5=\u0399 +FormatData/el/MonthNarrows/6=\u0399 +FormatData/el/MonthNarrows/7=\u0391 +FormatData/el/MonthNarrows/8=\u03a3 +FormatData/el/MonthNarrows/9=\u039f +FormatData/el/MonthNarrows/10=\u039d +FormatData/el/MonthNarrows/11=\u0394 +FormatData/el/MonthNarrows/12= +FormatData/hu/MonthNarrows/0=J +FormatData/hu/MonthNarrows/1=F +FormatData/hu/MonthNarrows/2=M +FormatData/hu/MonthNarrows/3=\u00c1 +FormatData/hu/MonthNarrows/4=M +FormatData/hu/MonthNarrows/5=J +FormatData/hu/MonthNarrows/6=J +FormatData/hu/MonthNarrows/7=A +FormatData/hu/MonthNarrows/8=Sz +FormatData/hu/MonthNarrows/9=O +FormatData/hu/MonthNarrows/10=N +FormatData/hu/MonthNarrows/11=D +FormatData/hu/MonthNarrows/12= +FormatData/es/MonthNarrows/0=E +FormatData/es/MonthNarrows/1=F +FormatData/es/MonthNarrows/2=M +FormatData/es/MonthNarrows/3=A +FormatData/es/MonthNarrows/4=M +FormatData/es/MonthNarrows/5=J +FormatData/es/MonthNarrows/6=J +FormatData/es/MonthNarrows/7=A +FormatData/es/MonthNarrows/8=S +FormatData/es/MonthNarrows/9=O +FormatData/es/MonthNarrows/10=N +FormatData/es/MonthNarrows/11=D +FormatData/es/MonthNarrows/12= +FormatData/tr/MonthNarrows/0=O +FormatData/tr/MonthNarrows/1=\u015e +FormatData/tr/MonthNarrows/2=M +FormatData/tr/MonthNarrows/3=N +FormatData/tr/MonthNarrows/4=M +FormatData/tr/MonthNarrows/5=H +FormatData/tr/MonthNarrows/6=T +FormatData/tr/MonthNarrows/7=A +FormatData/tr/MonthNarrows/8=E +FormatData/tr/MonthNarrows/9=E +FormatData/tr/MonthNarrows/10=K +FormatData/tr/MonthNarrows/11=A +FormatData/tr/MonthNarrows/12= +FormatData/hr/MonthNarrows/0=1. +FormatData/hr/MonthNarrows/1=2. +FormatData/hr/MonthNarrows/2=3. +FormatData/hr/MonthNarrows/3=4. +FormatData/hr/MonthNarrows/4=5. +FormatData/hr/MonthNarrows/5=6. +FormatData/hr/MonthNarrows/6=7. +FormatData/hr/MonthNarrows/7=8. +FormatData/hr/MonthNarrows/8=9. +FormatData/hr/MonthNarrows/9=10. +FormatData/hr/MonthNarrows/10=11. +FormatData/hr/MonthNarrows/11=12. +FormatData/hr/MonthNarrows/12= +FormatData/lt/MonthNarrows/0=S +FormatData/lt/MonthNarrows/1=V +FormatData/lt/MonthNarrows/2=K +FormatData/lt/MonthNarrows/3=B +FormatData/lt/MonthNarrows/4=G +FormatData/lt/MonthNarrows/5=B +FormatData/lt/MonthNarrows/6=L +FormatData/lt/MonthNarrows/7=R +FormatData/lt/MonthNarrows/8=R +FormatData/lt/MonthNarrows/9=S +FormatData/lt/MonthNarrows/10=L +FormatData/lt/MonthNarrows/11=G +FormatData/lt/MonthNarrows/12= +FormatData/sq/MonthNarrows/0=J +FormatData/sq/MonthNarrows/1=S +FormatData/sq/MonthNarrows/2=M +FormatData/sq/MonthNarrows/3=P +FormatData/sq/MonthNarrows/4=M +FormatData/sq/MonthNarrows/5=Q +FormatData/sq/MonthNarrows/6=K +FormatData/sq/MonthNarrows/7=G +FormatData/sq/MonthNarrows/8=S +FormatData/sq/MonthNarrows/9=T +FormatData/sq/MonthNarrows/10=N +FormatData/sq/MonthNarrows/11=D +FormatData/sq/MonthNarrows/12= +FormatData/fr/MonthNarrows/0=J +FormatData/fr/MonthNarrows/1=F +FormatData/fr/MonthNarrows/2=M +FormatData/fr/MonthNarrows/3=A +FormatData/fr/MonthNarrows/4=M +FormatData/fr/MonthNarrows/5=J +FormatData/fr/MonthNarrows/6=J +FormatData/fr/MonthNarrows/7=A +FormatData/fr/MonthNarrows/8=S +FormatData/fr/MonthNarrows/9=O +FormatData/fr/MonthNarrows/10=N +FormatData/fr/MonthNarrows/11=D +FormatData/fr/MonthNarrows/12= +FormatData/is/MonthNarrows/0=J +FormatData/is/MonthNarrows/1=F +FormatData/is/MonthNarrows/2=M +FormatData/is/MonthNarrows/3=A +FormatData/is/MonthNarrows/4=M +FormatData/is/MonthNarrows/5=J +FormatData/is/MonthNarrows/6=J +FormatData/is/MonthNarrows/7=\u00c1 +FormatData/is/MonthNarrows/8=L +FormatData/is/MonthNarrows/9=O +FormatData/is/MonthNarrows/10=N +FormatData/is/MonthNarrows/11=D +FormatData/is/MonthNarrows/12= +FormatData/de/MonthNarrows/0=J +FormatData/de/MonthNarrows/1=F +FormatData/de/MonthNarrows/2=M +FormatData/de/MonthNarrows/3=A +FormatData/de/MonthNarrows/4=M +FormatData/de/MonthNarrows/5=J +FormatData/de/MonthNarrows/6=J +FormatData/de/MonthNarrows/7=A +FormatData/de/MonthNarrows/8=S +FormatData/de/MonthNarrows/9=O +FormatData/de/MonthNarrows/10=N +FormatData/de/MonthNarrows/11=D +FormatData/de/MonthNarrows/12= +FormatData/en/MonthNarrows/0=J +FormatData/en/MonthNarrows/1=F +FormatData/en/MonthNarrows/2=M +FormatData/en/MonthNarrows/3=A +FormatData/en/MonthNarrows/4=M +FormatData/en/MonthNarrows/5=J +FormatData/en/MonthNarrows/6=J +FormatData/en/MonthNarrows/7=A +FormatData/en/MonthNarrows/8=S +FormatData/en/MonthNarrows/9=O +FormatData/en/MonthNarrows/10=N +FormatData/en/MonthNarrows/11=D +FormatData/en/MonthNarrows/12= +FormatData/ca/MonthNarrows/0=G +FormatData/ca/MonthNarrows/1=F +FormatData/ca/MonthNarrows/2=M +FormatData/ca/MonthNarrows/3=A +FormatData/ca/MonthNarrows/4=M +FormatData/ca/MonthNarrows/5=J +FormatData/ca/MonthNarrows/6=G +FormatData/ca/MonthNarrows/7=A +FormatData/ca/MonthNarrows/8=S +FormatData/ca/MonthNarrows/9=O +FormatData/ca/MonthNarrows/10=N +FormatData/ca/MonthNarrows/11=D +FormatData/ca/MonthNarrows/12= +FormatData/sl/MonthNarrows/0=j +FormatData/sl/MonthNarrows/1=f +FormatData/sl/MonthNarrows/2=m +FormatData/sl/MonthNarrows/3=a +FormatData/sl/MonthNarrows/4=m +FormatData/sl/MonthNarrows/5=j +FormatData/sl/MonthNarrows/6=j +FormatData/sl/MonthNarrows/7=a +FormatData/sl/MonthNarrows/8=s +FormatData/sl/MonthNarrows/9=o +FormatData/sl/MonthNarrows/10=n +FormatData/sl/MonthNarrows/11=d +FormatData/sl/MonthNarrows/12= +FormatData/fi/MonthNarrows/0=T +FormatData/fi/MonthNarrows/1=H +FormatData/fi/MonthNarrows/2=M +FormatData/fi/MonthNarrows/3=H +FormatData/fi/MonthNarrows/4=T +FormatData/fi/MonthNarrows/5=K +FormatData/fi/MonthNarrows/6=H +FormatData/fi/MonthNarrows/7=E +FormatData/fi/MonthNarrows/8=S +FormatData/fi/MonthNarrows/9=L +FormatData/fi/MonthNarrows/10=M +FormatData/fi/MonthNarrows/11=J +FormatData/fi/MonthNarrows/12= +FormatData/mk/MonthNarrows/0=\u0458 +FormatData/mk/MonthNarrows/1=\u0444 +FormatData/mk/MonthNarrows/2=\u043c +FormatData/mk/MonthNarrows/3=\u0430 +FormatData/mk/MonthNarrows/4=\u043c +FormatData/mk/MonthNarrows/5=\u0458 +FormatData/mk/MonthNarrows/6=\u0458 +FormatData/mk/MonthNarrows/7=\u0430 +FormatData/mk/MonthNarrows/8=\u0441 +FormatData/mk/MonthNarrows/9=\u043e +FormatData/mk/MonthNarrows/10=\u043d +FormatData/mk/MonthNarrows/11=\u0434 +FormatData/mk/MonthNarrows/12= +FormatData/sr-Latn/MonthNarrows/0=j +FormatData/sr-Latn/MonthNarrows/1=f +FormatData/sr-Latn/MonthNarrows/2=m +FormatData/sr-Latn/MonthNarrows/3=a +FormatData/sr-Latn/MonthNarrows/4=m +FormatData/sr-Latn/MonthNarrows/5=j +FormatData/sr-Latn/MonthNarrows/6=j +FormatData/sr-Latn/MonthNarrows/7=a +FormatData/sr-Latn/MonthNarrows/8=s +FormatData/sr-Latn/MonthNarrows/9=o +FormatData/sr-Latn/MonthNarrows/10=n +FormatData/sr-Latn/MonthNarrows/11=d +FormatData/sr-Latn/MonthNarrows/12= +FormatData/th/MonthNarrows/0=\u0e21.\u0e04. +FormatData/th/MonthNarrows/1=\u0e01.\u0e1e. +FormatData/th/MonthNarrows/2=\u0e21\u0e35.\u0e04. +FormatData/th/MonthNarrows/3=\u0e40\u0e21.\u0e22. +FormatData/th/MonthNarrows/4=\u0e1e.\u0e04. +FormatData/th/MonthNarrows/5=\u0e21\u0e34.\u0e22 +FormatData/th/MonthNarrows/6=\u0e01.\u0e04. +FormatData/th/MonthNarrows/7=\u0e2a.\u0e04. +FormatData/th/MonthNarrows/8=\u0e01.\u0e22. +FormatData/th/MonthNarrows/9=\u0e15.\u0e04. +FormatData/th/MonthNarrows/10=\u0e1e.\u0e22. +FormatData/th/MonthNarrows/11=\u0e18.\u0e04. +FormatData/th/MonthNarrows/12= +FormatData/ar/MonthNarrows/0=\u064a +FormatData/ar/MonthNarrows/1=\u0641 +FormatData/ar/MonthNarrows/2=\u0645 +FormatData/ar/MonthNarrows/3=\u0623 +FormatData/ar/MonthNarrows/4=\u0648 +FormatData/ar/MonthNarrows/5=\u0646 +FormatData/ar/MonthNarrows/6=\u0644 +FormatData/ar/MonthNarrows/7=\u063a +FormatData/ar/MonthNarrows/8=\u0633 +FormatData/ar/MonthNarrows/9=\u0643 +FormatData/ar/MonthNarrows/10=\u0628 +FormatData/ar/MonthNarrows/11=\u062f +FormatData/ar/MonthNarrows/12= +FormatData/ru/MonthNarrows/0=\u042f +FormatData/ru/MonthNarrows/1=\u0424 +FormatData/ru/MonthNarrows/2=\u041c +FormatData/ru/MonthNarrows/3=\u0410 +FormatData/ru/MonthNarrows/4=\u041c +FormatData/ru/MonthNarrows/5=\u0418 +FormatData/ru/MonthNarrows/6=\u0418 +FormatData/ru/MonthNarrows/7=\u0410 +FormatData/ru/MonthNarrows/8=\u0421 +FormatData/ru/MonthNarrows/9=\u041e +FormatData/ru/MonthNarrows/10=\u041d +FormatData/ru/MonthNarrows/11=\u0414 +FormatData/ru/MonthNarrows/12= +FormatData/ms/MonthNarrows/0=J +FormatData/ms/MonthNarrows/1=F +FormatData/ms/MonthNarrows/2=M +FormatData/ms/MonthNarrows/3=A +FormatData/ms/MonthNarrows/4=M +FormatData/ms/MonthNarrows/5=J +FormatData/ms/MonthNarrows/6=J +FormatData/ms/MonthNarrows/7=O +FormatData/ms/MonthNarrows/8=S +FormatData/ms/MonthNarrows/9=O +FormatData/ms/MonthNarrows/10=N +FormatData/ms/MonthNarrows/11=D +FormatData/ms/MonthNarrows/12= +FormatData/nl/MonthNarrows/0=J +FormatData/nl/MonthNarrows/1=F +FormatData/nl/MonthNarrows/2=M +FormatData/nl/MonthNarrows/3=A +FormatData/nl/MonthNarrows/4=M +FormatData/nl/MonthNarrows/5=J +FormatData/nl/MonthNarrows/6=J +FormatData/nl/MonthNarrows/7=A +FormatData/nl/MonthNarrows/8=S +FormatData/nl/MonthNarrows/9=O +FormatData/nl/MonthNarrows/10=N +FormatData/nl/MonthNarrows/11=D +FormatData/nl/MonthNarrows/12= +FormatData/vi/MonthNarrows/0=1 +FormatData/vi/MonthNarrows/1=2 +FormatData/vi/MonthNarrows/2=3 +FormatData/vi/MonthNarrows/3=4 +FormatData/vi/MonthNarrows/4=5 +FormatData/vi/MonthNarrows/5=6 +FormatData/vi/MonthNarrows/6=7 +FormatData/vi/MonthNarrows/7=8 +FormatData/vi/MonthNarrows/8=9 +FormatData/vi/MonthNarrows/9=10 +FormatData/vi/MonthNarrows/10=11 +FormatData/vi/MonthNarrows/11=12 +FormatData/vi/MonthNarrows/12= +FormatData/sr/MonthNarrows/0=\u0458 +FormatData/sr/MonthNarrows/1=\u0444 +FormatData/sr/MonthNarrows/2=\u043c +FormatData/sr/MonthNarrows/3=\u0430 +FormatData/sr/MonthNarrows/4=\u043c +FormatData/sr/MonthNarrows/5=\u0458 +FormatData/sr/MonthNarrows/6=\u0458 +FormatData/sr/MonthNarrows/7=\u0430 +FormatData/sr/MonthNarrows/8=\u0441 +FormatData/sr/MonthNarrows/9=\u043e +FormatData/sr/MonthNarrows/10=\u043d +FormatData/sr/MonthNarrows/11=\u0434 +FormatData/sr/MonthNarrows/12= +FormatData/mt/MonthNarrows/0=J +FormatData/mt/MonthNarrows/1=F +FormatData/mt/MonthNarrows/2=M +FormatData/mt/MonthNarrows/3=A +FormatData/mt/MonthNarrows/4=M +FormatData/mt/MonthNarrows/5=\u0120 +FormatData/mt/MonthNarrows/6=L +FormatData/mt/MonthNarrows/7=A +FormatData/mt/MonthNarrows/8=S +FormatData/mt/MonthNarrows/9=O +FormatData/mt/MonthNarrows/10=N +FormatData/mt/MonthNarrows/11=D +FormatData/mt/MonthNarrows/12= +FormatData/da/MonthNarrows/0=J +FormatData/da/MonthNarrows/1=F +FormatData/da/MonthNarrows/2=M +FormatData/da/MonthNarrows/3=A +FormatData/da/MonthNarrows/4=M +FormatData/da/MonthNarrows/5=J +FormatData/da/MonthNarrows/6=J +FormatData/da/MonthNarrows/7=A +FormatData/da/MonthNarrows/8=S +FormatData/da/MonthNarrows/9=O +FormatData/da/MonthNarrows/10=N +FormatData/da/MonthNarrows/11=D +FormatData/da/MonthNarrows/12= +FormatData/ro/MonthNarrows/0=I +FormatData/ro/MonthNarrows/1=F +FormatData/ro/MonthNarrows/2=M +FormatData/ro/MonthNarrows/3=A +FormatData/ro/MonthNarrows/4=M +FormatData/ro/MonthNarrows/5=I +FormatData/ro/MonthNarrows/6=I +FormatData/ro/MonthNarrows/7=A +FormatData/ro/MonthNarrows/8=S +FormatData/ro/MonthNarrows/9=O +FormatData/ro/MonthNarrows/10=N +FormatData/ro/MonthNarrows/11=D +FormatData/ro/MonthNarrows/12= +FormatData/no/MonthNarrows/0=J +FormatData/no/MonthNarrows/1=F +FormatData/no/MonthNarrows/2=M +FormatData/no/MonthNarrows/3=A +FormatData/no/MonthNarrows/4=M +FormatData/no/MonthNarrows/5=J +FormatData/no/MonthNarrows/6=J +FormatData/no/MonthNarrows/7=A +FormatData/no/MonthNarrows/8=S +FormatData/no/MonthNarrows/9=O +FormatData/no/MonthNarrows/10=N +FormatData/no/MonthNarrows/11=D +FormatData/no/MonthNarrows/12= +FormatData/pl/MonthNarrows/0=s +FormatData/pl/MonthNarrows/1=l +FormatData/pl/MonthNarrows/2=m +FormatData/pl/MonthNarrows/3=k +FormatData/pl/MonthNarrows/4=m +FormatData/pl/MonthNarrows/5=c +FormatData/pl/MonthNarrows/6=l +FormatData/pl/MonthNarrows/7=s +FormatData/pl/MonthNarrows/8=w +FormatData/pl/MonthNarrows/9=p +FormatData/pl/MonthNarrows/10=l +FormatData/pl/MonthNarrows/11=g +FormatData/pl/MonthNarrows/12= +FormatData/iw/MonthNarrows/0=1 +FormatData/iw/MonthNarrows/1=2 +FormatData/iw/MonthNarrows/2=3 +FormatData/iw/MonthNarrows/3=4 +FormatData/iw/MonthNarrows/4=5 +FormatData/iw/MonthNarrows/5=6 +FormatData/iw/MonthNarrows/6=7 +FormatData/iw/MonthNarrows/7=8 +FormatData/iw/MonthNarrows/8=9 +FormatData/iw/MonthNarrows/9=10 +FormatData/iw/MonthNarrows/10=11 +FormatData/iw/MonthNarrows/11=12 +FormatData/iw/MonthNarrows/12= +FormatData/zh/MonthNarrows/0=1 +FormatData/zh/MonthNarrows/1=2 +FormatData/zh/MonthNarrows/2=3 +FormatData/zh/MonthNarrows/3=4 +FormatData/zh/MonthNarrows/4=5 +FormatData/zh/MonthNarrows/5=6 +FormatData/zh/MonthNarrows/6=7 +FormatData/zh/MonthNarrows/7=8 +FormatData/zh/MonthNarrows/8=9 +FormatData/zh/MonthNarrows/9=10 +FormatData/zh/MonthNarrows/10=11 +FormatData/zh/MonthNarrows/11=12 +FormatData/zh/MonthNarrows/12= --- ./jdk/test/sun/text/resources/LocaleDataTest.java Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/text/resources/LocaleDataTest.java Wed Feb 04 12:14:43 2015 -0800 @@ -36,7 +36,7 @@ * 6919624 6998391 7019267 7020960 7025837 7020583 7036905 7066203 7101495 * 7003124 7085757 7028073 7171028 7189611 8000983 7195759 8004489 8006509 * 7114053 7074882 7040556 8013836 8021121 6192407 6931564 8027695 7090826 - * 8017142 8037343 8055222 + * 8017142 8037343 8055222 8042126 * @summary Verify locale data * */ --- ./jdk/test/sun/tools/jrunscript/jrunscriptTest.sh Mon Dec 08 12:29:42 2014 -0800 +++ ./jdk/test/sun/tools/jrunscript/jrunscriptTest.sh Wed Feb 04 12:14:43 2015 -0800 @@ -42,7 +42,7 @@ rm -f jrunscriptTest.out 2>/dev/null ${JRUNSCRIPT} -J-Djava.awt.headless=true -l nashorn > jrunscriptTest.out 2>&1 <= 0) { + int sindex = index + launcherPidString.length(); + int tindex = sindex + line.substring(sindex).indexOf("'"); + System.out.println("DEBUG INFO: sindex = " + sindex); + System.out.println("DEBUG INFO: searching substring: " + line.substring(sindex)); + System.out.println("DEBUG INFO: tindex = " + tindex); + // DEBUG INFO + System.out.println(tr); + launcherPid = line.substring(sindex, tindex); + break; + } + } + if (launcherPid == null) { + System.out.println(tr); + throw new RuntimeException("Error: failed to find launcher Pid in launcher tracking info"); + } + + // did we create the env var with the correct pid? + if (!launcherPid.equals(envVarPid)) { + System.out.println(tr); + System.out.println("Error: wrong pid in creating env var"); + System.out.println("Error Info: launcherPid = " + launcherPid); + System.out.println("Error Info: envVarPid = " + envVarPid); + throw new RuntimeException("Error: wrong pid in creating env var"); + } + } + + + // --- Test 1b + if (!tr.contains("NativeMemoryTracking: got value " + NMT_Option_Value)) { + System.out.println(tr); + throw new RuntimeException("Error: Valid param failed to set env variable"); + } + + // --- Test 2 + tr = doExec(envMap,javaCmd, "-XX:NativeMemoryTracking=", + "-version"); + if (tr.contains("NativeMemoryTracking:")) { + System.out.println(tr); + throw new RuntimeException("Error: invalid param caused env variable to be erroneously created"); + } + if (!tr.contains("Syntax error, expecting -XX:NativeMemoryTracking=")) { + System.out.println(tr); + throw new RuntimeException("Error: invalid param not checked by JVM"); + } + + // --- Test 3 + tr = doExec(envMap,javaCmd, "-XX:NativeMemoryTracking", + "-version"); + if (tr.contains("NativeMemoryTracking:")) { + System.out.println(tr); + throw new RuntimeException("Error: invalid param caused env variable to be erroneously created"); + } + if (!tr.contains("Syntax error, expecting -XX:NativeMemoryTracking=")) { + System.out.println(tr); + throw new RuntimeException("Error: invalid param not checked by JVM"); + } + // --- Test 4 + tr = doExec(envMap,javaCmd, "-XX:NativeMemoryTracking=BADVALUE", + "-version"); + if (!tr.contains("expecting -XX:NativeMemoryTracking")) { + System.out.println(tr); + throw new RuntimeException("Error: invalid param did not get JVM Syntax error message"); + } + + } // NativeMemoryTracking + + // MacOSX specific tests ensue...... if (!isMacOSX) return; --- ./langtools/.hgtags Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/.hgtags Wed Feb 04 12:14:46 2015 -0800 @@ -347,3 +347,29 @@ 99c3209f228e1f9aa874b6bd0908fd5d9ebf7078 jdk8u31-b10 e72be544fa9e247fba3c6bb61e291d80e127a461 jdk8u31-b11 c956b12b30ee21a4fc5df1871fa3b01e84310ebe jdk8u31-b12 +7a34ec7bb1c831e82ac88da578a028572b676260 jdk8u31-b13 +d231957fe3103e790465fcf058fb8cb33bbc4c4e jdk8u40-b00 +bf89a471779d13a9407f7d1c86f7716258bc4aa6 jdk8u40-b01 +0b6cc4ea670f5d17b56c088f202869bdbb80a5ce jdk8u40-b02 +5183e8b58a03206ca65b4b211be85b3740a70c39 jdk8u40-b03 +cde557bc48f5cd0c6b6aa70bdbc7563677379347 jdk8u40-b04 +a36fce70b505ec15be8353d40d417d331fcce740 jdk8u40-b05 +7c3d27120b92b6abbd2df910722405dfb02d4399 jdk8u40-b06 +2fa3858a281f9deae15bcc49224efd5b951b745d jdk8u40-b07 +d3515520e68e26c1012fca18eef190f8aff3a7a1 jdk8u40-b08 +8bb38a35072279618aa2cacd4fea74155a6dccf9 jdk8u40-b09 +69b84370397fbb5a66b99578242c47da7f8b3cb5 jdk8u40-b10 +d3c93dc64c5e1ffd610fb31362a78bedfd8097ba jdk8u40-b11 +e7560bceb36a933f5eb6ce8c33dce030ba0288f2 jdk8u40-b12 +88ce114c6adc387dc7fc5831b8263f152f0412fb jdk8u40-b13 +f18c5b47f27b387d94487890684abe5a554b0d9b jdk8u40-b14 +682a6c1aefd766eaf774ffeb1207a5189edf94d6 jdk8u40-b15 +74c51ff270c51d17732250411fe9cd5392bc925e jdk8u40-b16 +a12a9932f649dd3df174d3e340527433d3695c49 jdk8u40-b17 +94f30e5fde53e3ddcd3c4e9842349318eae8fe10 jdk8u40-b18 +0c514d1fd006fc79d35b670de10c370c8d559db7 jdk8u40-b19 +c3d6d1a5339952fbe4124e700407b7211446c99c jdk8u40-b20 +9113c7c8d902ec94b28ca0ef4a6466bdba65fcfc jdk8u40-b21 +79177246b3dbe5296fb53755d8695acdaef59fc8 jdk8u40-b22 +fb294b49373bda0b3afc7f011d64ecefed73b42e jdk8u40-b23 +c5d4ffa220f3824c2ea5d39dc99d41a9df9e5ae5 jdk8u40-b24 --- ./langtools/THIRD_PARTY_README Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/THIRD_PARTY_README Wed Feb 04 12:14:46 2015 -0800 @@ -3385,7 +3385,7 @@ included with JRE 8, JDK 8, and OpenJDK 8. Apache Commons Math 3.2 - Apache Derby 10.10.1.3 + Apache Derby 10.11.1.2 Apache Jakarta BCEL 5.1 Apache Jakarta Regexp 1.4 Apache Santuario XML Security for Java 1.5.4 --- ./langtools/src/share/classes/com/sun/source/doctree/package-info.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/source/doctree/package-info.java Wed Feb 04 12:14:46 2015 -0800 @@ -29,7 +29,7 @@ * * @author Jonathan Gibbons * @since 1.8 - * @see http://download.oracle.com/javase/6/docs/technotes/tools/solaris/javadoc.html#javadoctags + * @see https://docs.oracle.com/javase/6/docs/technotes/tools/solaris/javadoc.html#javadoctags */ @jdk.Exported package com.sun.source.doctree; --- ./langtools/src/share/classes/com/sun/tools/classfile/BootstrapMethods_attribute.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/classfile/BootstrapMethods_attribute.java Wed Feb 04 12:14:46 2015 -0800 @@ -29,7 +29,7 @@ /** * See JVMS 4.7.21 - * http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.21 + * https://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.21 * *

This is NOT part of any supported API. * If you write code that depends on this, you do so at your own risk. --- ./langtools/src/share/classes/com/sun/tools/javac/code/Source.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/code/Source.java Wed Feb 04 12:14:46 2015 -0800 @@ -233,6 +233,9 @@ public boolean allowFunctionalInterfaceMostSpecific() { return compareTo(JDK1_8) >= 0; } + public boolean allowPostApplicabilityVarargsAccessCheck() { + return compareTo(JDK1_8) >= 0; + } public static SourceVersion toSourceVersion(Source source) { switch(source) { case JDK1_2: --- ./langtools/src/share/classes/com/sun/tools/javac/code/Types.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/code/Types.java Wed Feb 04 12:14:46 2015 -0800 @@ -1305,7 +1305,8 @@ UndetVar undetvar = (UndetVar)t; WildcardType wt = (WildcardType)s.unannotatedType(); switch(wt.kind) { - case UNBOUND: //similar to ? extends Object + case UNBOUND: + break; case EXTENDS: { Type bound = wildUpperBound(s); undetvar.addBound(InferenceBound.UPPER, bound, this); @@ -1396,6 +1397,7 @@ else { // debugContainsType(t, s); return isSameWildcard(t, s) + || t.type == s || isCaptureOf(s, t) || ((t.isExtendsBound() || isSubtypeNoCapture(wildLowerBound(t), cvarLowerBound(wildLowerBound(s)))) && // TODO: JDK-8039214, cvarUpperBound call here is incorrect @@ -1890,7 +1892,12 @@ * Mapping to take element type of an arraytype */ private Mapping elemTypeFun = new Mapping ("elemTypeFun") { - public Type apply(Type t) { return elemtype(t); } + public Type apply(Type t) { + while (t.hasTag(TYPEVAR)) { + t = t.getUpperBound(); + } + return elemtype(t); + } }; /** @@ -2954,6 +2961,12 @@ } @Override + public Type visitUndetVar(UndetVar t, Void ignored) { + //do nothing - we should not replace inside undet variables + return t; + } + + @Override public Type visitClassType(ClassType t, Void ignored) { if (!t.isCompound()) { List typarams = t.getTypeArguments(); @@ -3513,40 +3526,46 @@ } /** - * Return the least upper bound of pair of types. if the lub does + * Return the least upper bound of list of types. if the lub does * not exist return null. */ - public Type lub(Type t1, Type t2) { - return lub(List.of(t1, t2)); + public Type lub(List ts) { + return lub(ts.toArray(new Type[ts.length()])); } /** * Return the least upper bound (lub) of set of types. If the lub * does not exist return the type of null (bottom). */ - public Type lub(List ts) { + public Type lub(Type... ts) { + final int UNKNOWN_BOUND = 0; final int ARRAY_BOUND = 1; final int CLASS_BOUND = 2; - int boundkind = 0; - for (Type t : ts) { + + int[] kinds = new int[ts.length]; + + int boundkind = UNKNOWN_BOUND; + for (int i = 0 ; i < ts.length ; i++) { + Type t = ts[i]; switch (t.getTag()) { case CLASS: - boundkind |= CLASS_BOUND; + boundkind |= kinds[i] = CLASS_BOUND; break; case ARRAY: - boundkind |= ARRAY_BOUND; + boundkind |= kinds[i] = ARRAY_BOUND; break; case TYPEVAR: do { t = t.getUpperBound(); } while (t.hasTag(TYPEVAR)); if (t.hasTag(ARRAY)) { - boundkind |= ARRAY_BOUND; + boundkind |= kinds[i] = ARRAY_BOUND; } else { - boundkind |= CLASS_BOUND; + boundkind |= kinds[i] = CLASS_BOUND; } break; default: + kinds[i] = UNKNOWN_BOUND; if (t.isPrimitive()) return syms.errType; } @@ -3557,15 +3576,16 @@ case ARRAY_BOUND: // calculate lub(A[], B[]) - List elements = Type.map(ts, elemTypeFun); - for (Type t : elements) { - if (t.isPrimitive()) { + Type[] elements = new Type[ts.length]; + for (int i = 0 ; i < ts.length ; i++) { + Type elem = elements[i] = elemTypeFun.apply(ts[i]); + if (elem.isPrimitive()) { // if a primitive type is found, then return // arraySuperType unless all the types are the // same - Type first = ts.head; - for (Type s : ts.tail) { - if (!isSameType(first, s)) { + Type first = ts[0]; + for (int j = 1 ; j < ts.length ; j++) { + if (!isSameType(first, ts[j])) { // lub(int[], B[]) is Cloneable & Serializable return arraySuperType(); } @@ -3580,13 +3600,20 @@ case CLASS_BOUND: // calculate lub(A, B) - while (!ts.head.hasTag(CLASS) && !ts.head.hasTag(TYPEVAR)) { - ts = ts.tail; + int startIdx = 0; + for (int i = 0; i < ts.length ; i++) { + Type t = ts[i]; + if (t.hasTag(CLASS) || t.hasTag(TYPEVAR)) { + break; + } else { + startIdx++; + } } - Assert.check(!ts.isEmpty()); + Assert.check(startIdx < ts.length); //step 1 - compute erased candidate set (EC) - List cl = erasedSupertypes(ts.head); - for (Type t : ts.tail) { + List cl = erasedSupertypes(ts[startIdx]); + for (int i = startIdx + 1 ; i < ts.length ; i++) { + Type t = ts[i]; if (t.hasTag(CLASS) || t.hasTag(TYPEVAR)) cl = intersect(cl, erasedSupertypes(t)); } @@ -3595,9 +3622,10 @@ //step 3 - for each element G in MEC, compute lci(Inv(G)) List candidates = List.nil(); for (Type erasedSupertype : mec) { - List lci = List.of(asSuper(ts.head, erasedSupertype.tsym)); - for (Type t : ts) { - lci = intersect(lci, List.of(asSuper(t, erasedSupertype.tsym))); + List lci = List.of(asSuper(ts[startIdx], erasedSupertype.tsym)); + for (int i = startIdx + 1 ; i < ts.length ; i++) { + Type superType = asSuper(ts[i], erasedSupertype.tsym); + lci = intersect(lci, superType != null ? List.of(superType) : List.nil()); } candidates = candidates.appendList(lci); } @@ -3608,9 +3636,9 @@ default: // calculate lub(A, B[]) List classes = List.of(arraySuperType()); - for (Type t : ts) { - if (!t.hasTag(ARRAY)) // Filter out any arrays - classes = classes.prepend(t); + for (int i = 0 ; i < ts.length ; i++) { + if (kinds[i] != ARRAY_BOUND) // Filter out any arrays + classes = classes.prepend(ts[i]); } // lub(A, B[]) is lub(A, arraySuperType) return lub(classes); --- ./langtools/src/share/classes/com/sun/tools/javac/comp/Attr.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/comp/Attr.java Wed Feb 04 12:14:46 2015 -0800 @@ -252,36 +252,30 @@ */ Type check(final JCTree tree, final Type found, final int ownkind, final ResultInfo resultInfo) { InferenceContext inferenceContext = resultInfo.checkContext.inferenceContext(); - Type owntype = found; - if (!owntype.hasTag(ERROR) && !resultInfo.pt.hasTag(METHOD) && !resultInfo.pt.hasTag(FORALL)) { - if (allowPoly && inferenceContext.free(found)) { - if ((ownkind & ~resultInfo.pkind) == 0) { - owntype = resultInfo.check(tree, inferenceContext.asUndetVar(owntype)); - } else { - log.error(tree.pos(), "unexpected.type", - kindNames(resultInfo.pkind), - kindName(ownkind)); - owntype = types.createErrorType(owntype); - } + Type owntype; + if (!found.hasTag(ERROR) && !resultInfo.pt.hasTag(METHOD) && !resultInfo.pt.hasTag(FORALL)) { + if ((ownkind & ~resultInfo.pkind) != 0) { + log.error(tree.pos(), "unexpected.type", + kindNames(resultInfo.pkind), + kindName(ownkind)); + owntype = types.createErrorType(found); + } else if (allowPoly && inferenceContext.free(found)) { + //delay the check if there are inference variables in the found type + //this means we are dealing with a partially inferred poly expression + owntype = resultInfo.pt; inferenceContext.addFreeTypeListener(List.of(found, resultInfo.pt), new FreeTypeListener() { @Override public void typesInferred(InferenceContext inferenceContext) { ResultInfo pendingResult = - resultInfo.dup(inferenceContext.asInstType(resultInfo.pt)); + resultInfo.dup(inferenceContext.asInstType(resultInfo.pt)); check(tree, inferenceContext.asInstType(found), ownkind, pendingResult); } }); - return tree.type = resultInfo.pt; } else { - if ((ownkind & ~resultInfo.pkind) == 0) { - owntype = resultInfo.check(tree, owntype); - } else { - log.error(tree.pos(), "unexpected.type", - kindNames(resultInfo.pkind), - kindName(ownkind)); - owntype = types.createErrorType(owntype); - } + owntype = resultInfo.check(tree, found); } + } else { + owntype = found; } tree.type = owntype; return owntype; @@ -293,7 +287,7 @@ * @param env The current environment. */ boolean isAssignableAsBlankFinal(VarSymbol v, Env env) { - Symbol owner = owner(env); + Symbol owner = env.info.scope.owner; // owner refers to the innermost variable, method or // initializer block declaration at this point. return @@ -308,41 +302,6 @@ ((v.flags() & STATIC) != 0) == Resolve.isStatic(env)); } - /** - * Return the innermost enclosing owner symbol in a given attribution context - */ - Symbol owner(Env env) { - while (true) { - switch (env.tree.getTag()) { - case VARDEF: - //a field can be owner - VarSymbol vsym = ((JCVariableDecl)env.tree).sym; - if (vsym.owner.kind == TYP) { - return vsym; - } - break; - case METHODDEF: - //method def is always an owner - return ((JCMethodDecl)env.tree).sym; - case CLASSDEF: - //class def is always an owner - return ((JCClassDecl)env.tree).sym; - case BLOCK: - //static/instance init blocks are owner - Symbol blockSym = env.info.scope.owner; - if ((blockSym.flags() & BLOCK) != 0) { - return blockSym; - } - break; - case TOPLEVEL: - //toplevel is always an owner (for pkge decls) - return env.info.scope.owner; - } - Assert.checkNonNull(env.next); - env = env.next; - } - } - /** Check that variable can be assigned to. * @param pos The current source code position. * @param v The assigned varaible @@ -1051,8 +1010,12 @@ // parameters have already been entered env.info.scope.enter(tree.sym); } else { - memberEnter.memberEnter(tree, env); - annotate.flush(); + try { + annotate.enterStart(); + memberEnter.memberEnter(tree, env); + } finally { + annotate.enterDone(); + } } } else { if (tree.init != null) { @@ -2335,6 +2298,7 @@ currentTarget = infer.instantiateFunctionalInterface(that, currentTarget, explicitParamTypes, resultInfo.checkContext); } + currentTarget = types.removeWildcards(currentTarget); lambdaType = types.findDescriptorType(currentTarget); } else { currentTarget = Type.recoveryType; @@ -2727,7 +2691,7 @@ resultInfo.checkContext.deferredAttrContext().mode == DeferredAttr.AttrMode.CHECK && isSerializable(currentTarget); if (currentTarget != Type.recoveryType) { - currentTarget = targetChecker.visit(currentTarget, that); + currentTarget = types.removeWildcards(targetChecker.visit(currentTarget, that)); desc = types.findDescriptorType(currentTarget); } else { currentTarget = Type.recoveryType; @@ -3262,8 +3226,9 @@ elt = ((ArrayType)elt.unannotatedType()).elemtype; if (elt.hasTag(TYPEVAR)) { log.error(tree.pos(), "type.var.cant.be.deref"); - result = types.createErrorType(tree.type); - return; + result = tree.type = types.createErrorType(tree.name, site.tsym, site); + tree.sym = tree.type.tsym; + return ; } } @@ -3279,6 +3244,10 @@ // Determine the symbol represented by the selection. env.info.pendingResolutionPhase = null; Symbol sym = selectSym(tree, sitesym, site, env, resultInfo); + if (sym.kind == VAR && sym.name != names._super && env.info.defaultSuperCallSite != null) { + log.error(tree.selected.pos(), "not.encl.class", site.tsym); + sym = syms.errSymbol; + } if (sym.exists() && !isType(sym) && (pkind() & (PCK | TYP)) != 0) { site = capture(site); sym = selectSym(tree, sitesym, site, env, resultInfo); @@ -3665,7 +3634,7 @@ // and are subject to definite assignment checking. if ((env.info.enclVar == v || v.pos > tree.pos) && v.owner.kind == TYP && - canOwnInitializer(owner(env)) && + enclosingInitEnv(env) != null && v.owner == env.info.scope.owner.enclClass() && ((v.flags() & STATIC) != 0) == Resolve.isStatic(env) && (!env.tree.hasTag(ASSIGN) || @@ -3685,6 +3654,36 @@ } /** + * Returns the enclosing init environment associated with this env (if any). An init env + * can be either a field declaration env or a static/instance initializer env. + */ + Env enclosingInitEnv(Env env) { + while (true) { + switch (env.tree.getTag()) { + case VARDEF: + JCVariableDecl vdecl = (JCVariableDecl)env.tree; + if (vdecl.sym.owner.kind == TYP) { + //field + return env; + } + break; + case BLOCK: + if (env.next.tree.hasTag(CLASSDEF)) { + //instance/static initializer + return env; + } + break; + case METHODDEF: + case CLASSDEF: + case TOPLEVEL: + return null; + } + Assert.checkNonNull(env.next); + env = env.next; + } + } + + /** * Check for illegal references to static members of enum. In * an enum type, constructors and initializers may not * reference its static members unless they are constant. @@ -3737,17 +3736,6 @@ v.name != names._class; } - /** Can the given symbol be the owner of code which forms part - * if class initialization? This is the case if the symbol is - * a type or field, or if the symbol is the synthetic method. - * owning a block. - */ - private boolean canOwnInitializer(Symbol sym) { - return - (sym.kind & (VAR | TYP)) != 0 || - (sym.kind == MTH && (sym.flags() & BLOCK) != 0); - } - Warner noteWarner = new Warner(); /** @@ -4516,14 +4504,15 @@ super.visitTypeTest(tree); } public void visitNewClass(JCNewClass tree) { - if (tree.clazz.hasTag(ANNOTATED_TYPE)) { - checkForDeclarationAnnotations(((JCAnnotatedType) tree.clazz).annotations, - tree.clazz.type.tsym); - } - if (tree.def != null) { - checkForDeclarationAnnotations(tree.def.mods.annotations, tree.clazz.type.tsym); - } - if (tree.clazz.type != null) { + if (tree.clazz != null && tree.clazz.type != null) { + if (tree.clazz.hasTag(ANNOTATED_TYPE)) { + checkForDeclarationAnnotations(((JCAnnotatedType) tree.clazz).annotations, + tree.clazz.type.tsym); + } + if (tree.def != null) { + checkForDeclarationAnnotations(tree.def.mods.annotations, tree.clazz.type.tsym); + } + validateAnnotatedType(tree.clazz, tree.clazz.type); } super.visitNewClass(tree); --- ./langtools/src/share/classes/com/sun/tools/javac/comp/Check.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/comp/Check.java Wed Feb 04 12:14:46 2015 -0800 @@ -531,8 +531,8 @@ Type checkType(final DiagnosticPosition pos, final Type found, final Type req, final CheckContext checkContext) { final Infer.InferenceContext inferenceContext = checkContext.inferenceContext(); - if (inferenceContext.free(req)) { - inferenceContext.addFreeTypeListener(List.of(req), new FreeTypeListener() { + if (inferenceContext.free(req) || inferenceContext.free(found)) { + inferenceContext.addFreeTypeListener(List.of(req, found), new FreeTypeListener() { @Override public void typesInferred(InferenceContext inferenceContext) { checkType(pos, inferenceContext.asInstType(found), inferenceContext.asInstType(req), checkContext); @@ -1715,7 +1715,12 @@ // Warn if a deprecated method overridden by a non-deprecated one. if (!isDeprecatedOverrideIgnorable(other, origin)) { - checkDeprecated(TreeInfo.diagnosticPositionFor(m, tree), m, other); + Lint prevLint = setLint(lint.augment(m)); + try { + checkDeprecated(TreeInfo.diagnosticPositionFor(m, tree), m, other); + } finally { + setLint(prevLint); + } } } // where --- ./langtools/src/share/classes/com/sun/tools/javac/comp/DeferredAttr.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/comp/DeferredAttr.java Wed Feb 04 12:14:46 2015 -0800 @@ -497,13 +497,11 @@ } } if (!progress) { - DeferredAttrContext dac = this; - while (dac != emptyDeferredAttrContext) { - if (dac.mode == AttrMode.SPECULATIVE) { - //unsticking does not take place during overload - break; + if (insideOverloadPhase()) { + for (DeferredAttrNode deferredNode: deferredAttrNodes) { + deferredNode.dt.tree.type = Type.noType; } - dac = dac.parent; + return; } //remove all variables that have already been instantiated //from the list of stuck variables @@ -519,6 +517,17 @@ } } } + + private boolean insideOverloadPhase() { + DeferredAttrContext dac = this; + if (dac == emptyDeferredAttrContext) { + return false; + } + if (dac.mode == AttrMode.SPECULATIVE) { + return true; + } + return dac.parent.insideOverloadPhase(); + } } /** @@ -579,6 +588,8 @@ return false; } } else { + Assert.check(!deferredAttrContext.insideOverloadPhase(), + "attribution shouldn't be happening here"); ResultInfo instResultInfo = resultInfo.dup(deferredAttrContext.inferenceContext.asInstType(resultInfo.pt)); dt.check(instResultInfo, dummyStuckPolicy, basicCompleter); @@ -1314,6 +1325,12 @@ site = env.enclClass.sym.type; } + while (site.hasTag(TYPEVAR)) { + site = site.getUpperBound(); + } + + site = types.capture(site); + List args = rs.dummyArgs(tree.args.length()); Name name = TreeInfo.name(tree.meth); @@ -1337,7 +1354,9 @@ @Override public Symbol process(MethodSymbol ms) { ArgumentExpressionKind kind = ArgumentExpressionKind.methodKind(ms, types); - return kind != ArgumentExpressionKind.POLY ? ms.getReturnType().tsym : null; + if (kind == ArgumentExpressionKind.POLY || ms.getReturnType().hasTag(TYPEVAR)) + return null; + return ms.getReturnType().tsym; } @Override public Symbol reduce(Symbol s1, Symbol s2) { --- ./langtools/src/share/classes/com/sun/tools/javac/comp/Flow.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/comp/Flow.java Wed Feb 04 12:14:46 2015 -0800 @@ -208,7 +208,7 @@ public void analyzeTree(Env env, TreeMaker make) { new AliveAnalyzer().analyzeTree(env, make); - new AssignAnalyzer(log, syms, lint, names, enforceThisDotInit).analyzeTree(env); + new AssignAnalyzer().analyzeTree(env); new FlowAnalyzer().analyzeTree(env, make); new CaptureAnalyzer().analyzeTree(env, make); } @@ -241,13 +241,19 @@ //related errors, which will allow for more errors to be detected Log.DiagnosticHandler diagHandler = new Log.DiscardDiagnosticHandler(log); try { - new AssignAnalyzer(log, syms, lint, names, enforceThisDotInit) { + new AssignAnalyzer() { + Scope enclosedSymbols = new Scope(env.enclClass.sym); + @Override + public void visitVarDef(JCVariableDecl tree) { + enclosedSymbols.enter(tree.sym); + super.visitVarDef(tree); + } @Override protected boolean trackable(VarSymbol sym) { - return !env.info.scope.includes(sym) && + return enclosedSymbols.includes(sym) && sym.owner.kind == MTH; } - }.analyzeTree(env); + }.analyzeTree(env, that); LambdaFlowAnalyzer flowAnalyzer = new LambdaFlowAnalyzer(); flowAnalyzer.analyzeTree(env, that, make); return flowAnalyzer.inferredThrownTypes; @@ -1373,12 +1379,12 @@ * effectively-final local variables/parameters. */ - public abstract static class AbstractAssignAnalyzer

+ public abstract class AbstractAssignAnalyzer

.AbstractAssignPendingExit> extends BaseAnalyzer

{ /** The set of definitely assigned variables. */ - protected final Bits inits; + protected Bits inits; /** The set of definitely unassigned variables. */ @@ -1432,13 +1438,7 @@ /** The starting position of the analysed tree */ int startPos; - final Symtab syms; - - protected Names names; - - final boolean enforceThisDotInit; - - public static class AbstractAssignPendingExit extends BaseAnalyzer.PendingExit { + public class AbstractAssignPendingExit extends BaseAnalyzer.PendingExit { final Bits inits; final Bits uninits; @@ -1460,17 +1460,14 @@ } } - public AbstractAssignAnalyzer(Bits inits, Symtab syms, Names names, boolean enforceThisDotInit) { - this.inits = inits; + public AbstractAssignAnalyzer() { + this.inits = new Bits(); uninits = new Bits(); uninitsTry = new Bits(); initsWhenTrue = new Bits(true); initsWhenFalse = new Bits(true); uninitsWhenTrue = new Bits(true); uninitsWhenFalse = new Bits(true); - this.syms = syms; - this.names = names; - this.enforceThisDotInit = enforceThisDotInit; } private boolean isInitialConstructor = false; @@ -2439,26 +2436,15 @@ } } - public static class AssignAnalyzer - extends AbstractAssignAnalyzer { + public class AssignAnalyzer extends AbstractAssignAnalyzer { - Log log; - Lint lint; - - public static class AssignPendingExit - extends AbstractAssignAnalyzer.AbstractAssignPendingExit { + public class AssignPendingExit extends AbstractAssignAnalyzer.AbstractAssignPendingExit { public AssignPendingExit(JCTree tree, final Bits inits, final Bits uninits) { super(tree, inits, uninits); } } - public AssignAnalyzer(Log log, Symtab syms, Lint lint, Names names, boolean enforceThisDotInit) { - super(new Bits(), syms, names, enforceThisDotInit); - this.log = log; - this.lint = lint; - } - @Override protected AssignPendingExit createNewPendingExit(JCTree tree, Bits inits, Bits uninits) { --- ./langtools/src/share/classes/com/sun/tools/javac/comp/Infer.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/comp/Infer.java Wed Feb 04 12:14:46 2015 -0800 @@ -353,6 +353,7 @@ Type to, Attr.ResultInfo resultInfo, InferenceContext inferenceContext) { inferenceContext.solve(List.of(from.qtype), new Warner()); + inferenceContext.notifyChange(); Type capturedType = resultInfo.checkContext.inferenceContext() .cachedCapture(tree, from.inst, false); if (types.isConvertible(capturedType, @@ -449,7 +450,7 @@ class ImplicitArgType extends DeferredAttr.DeferredTypeMap { public ImplicitArgType(Symbol msym, Resolve.MethodResolutionPhase phase) { - rs.deferredAttr.super(AttrMode.SPECULATIVE, msym, phase); + (rs.deferredAttr).super(AttrMode.SPECULATIVE, msym, phase); } public Type apply(Type t) { @@ -517,6 +518,8 @@ //or if it's not a subtype of the original target, issue an error checkContext.report(pos, diags.fragment("no.suitable.functional.intf.inst", funcInterface)); } + //propagate constraints as per JLS 18.2.1 + checkContext.compatible(owntype, funcInterface, types.noWarnings); return owntype; } } @@ -781,7 +784,10 @@ while (tmpTail.nonEmpty()) { Type b1 = boundList.head; Type b2 = tmpTail.head; - if (b1 != b2) { + /* This wildcard check is temporary workaround. This code may need to be + * revisited once spec bug JDK-7034922 is fixed. + */ + if (b1 != b2 && !b1.hasTag(WILDCARD) && !b2.hasTag(WILDCARD)) { Pair commonSupers = infer.getParameterizedSupers(b1, b2); if (commonSupers != null) { List allParamsSuperBound1 = commonSupers.fst.allparams(); --- ./langtools/src/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java Wed Feb 04 12:14:46 2015 -0800 @@ -41,6 +41,7 @@ import com.sun.tools.javac.code.Symtab; import com.sun.tools.javac.code.Type; import com.sun.tools.javac.code.Type.MethodType; +import com.sun.tools.javac.code.Type.TypeVar; import com.sun.tools.javac.code.Types; import com.sun.tools.javac.comp.LambdaToMethod.LambdaAnalyzerPreprocessor.*; import com.sun.tools.javac.comp.Lower.BasicFreeVarCollector; @@ -61,6 +62,7 @@ import static com.sun.tools.javac.code.Kinds.*; import static com.sun.tools.javac.code.TypeTag.*; import static com.sun.tools.javac.tree.JCTree.Tag.*; +import javax.lang.model.type.TypeKind; /** * This pass desugars lambda expressions into static methods @@ -321,7 +323,9 @@ ListBuffer syntheticInits = new ListBuffer<>(); - if (!sym.isStatic()) { + if (localContext.methodReferenceReceiver != null) { + syntheticInits.append(localContext.methodReferenceReceiver); + } else if (!sym.isStatic()) { syntheticInits.append(makeThis( sym.owner.enclClass().asType(), localContext.owner.enclClass())); @@ -364,17 +368,10 @@ //first determine the method symbol to be used to generate the sam instance //this is either the method reference symbol, or the bridged reference symbol - Symbol refSym = localContext.needsBridge() - ? localContext.bridgeSym - : localContext.isSignaturePolymorphic() + Symbol refSym = localContext.isSignaturePolymorphic() ? localContext.sigPolySym : tree.sym; - //build the bridge method, if needed - if (localContext.needsBridge()) { - bridgeMemberReference(tree, localContext); - } - //the qualifying expression is treated as a special captured arg JCExpression init; switch(tree.kind) { @@ -744,126 +741,146 @@ // /** - * Generate an adapter method "bridge" for a method reference which cannot - * be used directly. + * Converts a method reference which cannot be used directly into a lambda */ - private class MemberReferenceBridger { + private class MemberReferenceToLambda { private final JCMemberReference tree; private final ReferenceTranslationContext localContext; + private final Symbol owner; private final ListBuffer args = new ListBuffer<>(); private final ListBuffer params = new ListBuffer<>(); - MemberReferenceBridger(JCMemberReference tree, ReferenceTranslationContext localContext) { + private JCExpression receiverExpression = null; + + MemberReferenceToLambda(JCMemberReference tree, ReferenceTranslationContext localContext, Symbol owner) { this.tree = tree; this.localContext = localContext; + this.owner = owner; } - /** - * Generate the bridge - */ - JCMethodDecl bridge() { + JCLambda lambda() { int prevPos = make.pos; try { make.at(tree); - Type samDesc = localContext.bridgedRefSig(); - List samPTypes = samDesc.getParameterTypes(); - //an extra argument is prepended to the signature of the bridge in case - //the member reference is an instance method reference (in which case - //the receiver expression is passed to the bridge itself). - Type recType = null; - switch (tree.kind) { - case IMPLICIT_INNER: - recType = tree.sym.owner.type.getEnclosingType(); - break; - case BOUND: - recType = tree.getQualifierExpression().type; - break; - case UNBOUND: - recType = samPTypes.head; - samPTypes = samPTypes.tail; - break; - } + //body generation - this can be either a method call or a + //new instance creation expression, depending on the member reference kind + VarSymbol rcvr = addParametersReturnReceiver(); + JCExpression expr = (tree.getMode() == ReferenceMode.INVOKE) + ? expressionInvoke(rcvr) + : expressionNew(); - //generate the parameter list for the bridged member reference - the - //bridge signature will match the signature of the target sam descriptor - - VarSymbol rcvr = (recType == null) - ? null - : addParameter("rec$", recType, false); - - List refPTypes = tree.sym.type.getParameterTypes(); - int refSize = refPTypes.size(); - int samSize = samPTypes.size(); - // Last parameter to copy from referenced method - int last = localContext.needsVarArgsConversion() ? refSize - 1 : refSize; - - List l = refPTypes; - // Use parameter types of the referenced method, excluding final var args - for (int i = 0; l.nonEmpty() && i < last; ++i) { - addParameter("x$" + i, l.head, true); - l = l.tail; - } - // Flatten out the var args - for (int i = last; i < samSize; ++i) { - addParameter("xva$" + i, tree.varargsElement, true); - } - - //generate the bridge method declaration - JCMethodDecl bridgeDecl = make.MethodDef(make.Modifiers(localContext.bridgeSym.flags()), - localContext.bridgeSym.name, - make.QualIdent(samDesc.getReturnType().tsym), - List.nil(), - params.toList(), - tree.sym.type.getThrownTypes() == null - ? List.nil() - : make.Types(tree.sym.type.getThrownTypes()), - null, - null); - bridgeDecl.sym = (MethodSymbol) localContext.bridgeSym; - bridgeDecl.type = localContext.bridgeSym.type = - types.createMethodTypeWithParameters(samDesc, TreeInfo.types(params.toList())); - - //bridge method body generation - this can be either a method call or a - //new instance creation expression, depending on the member reference kind - JCExpression bridgeExpr = (tree.getMode() == ReferenceMode.INVOKE) - ? bridgeExpressionInvoke(makeReceiver(rcvr)) - : bridgeExpressionNew(); - - //the body is either a return expression containing a method call, - //or the method call itself, depending on whether the return type of - //the bridge is non-void/void. - bridgeDecl.body = makeLambdaExpressionBody(bridgeExpr, bridgeDecl); - - return bridgeDecl; + JCLambda slam = make.Lambda(params.toList(), expr); + slam.targets = tree.targets; + slam.type = tree.type; + slam.pos = tree.pos; + return slam; } finally { make.at(prevPos); } } - //where - private JCExpression makeReceiver(VarSymbol rcvr) { - if (rcvr == null) return null; - JCExpression rcvrExpr = make.Ident(rcvr); - Type rcvrType = tree.sym.enclClass().type; - if (!rcvr.type.tsym.isSubClass(rcvrType.tsym, types)) { - rcvrExpr = make.TypeCast(make.Type(rcvrType), rcvrExpr).setType(rcvrType); + + /** + * Generate the parameter list for the converted member reference. + * + * @return The receiver variable symbol, if any + */ + VarSymbol addParametersReturnReceiver() { + Type samDesc = localContext.bridgedRefSig(); + List samPTypes = samDesc.getParameterTypes(); + List descPTypes = tree.getDescriptorType(types).getParameterTypes(); + + // Determine the receiver, if any + VarSymbol rcvr; + switch (tree.kind) { + case BOUND: + // The receiver is explicit in the method reference + rcvr = addParameter("rec$", tree.getQualifierExpression().type, false); + receiverExpression = attr.makeNullCheck(tree.getQualifierExpression()); + break; + case UNBOUND: + // The receiver is the first parameter, extract it and + // adjust the SAM and unerased type lists accordingly + rcvr = addParameter("rec$", samDesc.getParameterTypes().head, false); + samPTypes = samPTypes.tail; + descPTypes = descPTypes.tail; + break; + default: + rcvr = null; + break; + } + List implPTypes = tree.sym.type.getParameterTypes(); + int implSize = implPTypes.size(); + int samSize = samPTypes.size(); + // Last parameter to copy from referenced method, exclude final var args + int last = localContext.needsVarArgsConversion() ? implSize - 1 : implSize; + + // Failsafe -- assure match-up + boolean checkForIntersection = tree.varargsElement != null || implSize == descPTypes.size(); + + // Use parameter types of the implementation method unless the unerased + // SAM parameter type is an intersection type, in that case use the + // erased SAM parameter type so that the supertype relationship + // the implementation method parameters is not obscured. + // Note: in this loop, the lists implPTypes, samPTypes, and descPTypes + // are used as pointers to the current parameter type information + // and are thus not usable afterwards. + for (int i = 0; implPTypes.nonEmpty() && i < last; ++i) { + // By default use the implementation method parmeter type + Type parmType = implPTypes.head; + // If the unerased parameter type is a type variable whose + // bound is an intersection (eg. ) then + // use the SAM parameter type + if (checkForIntersection && descPTypes.head.getKind() == TypeKind.TYPEVAR) { + TypeVar tv = (TypeVar) descPTypes.head; + if (tv.bound.getKind() == TypeKind.INTERSECTION) { + parmType = samPTypes.head; + } } - return rcvrExpr; + addParameter("x$" + i, parmType, true); + + // Advance to the next parameter + implPTypes = implPTypes.tail; + samPTypes = samPTypes.tail; + descPTypes = descPTypes.tail; + } + // Flatten out the var args + for (int i = last; i < samSize; ++i) { + addParameter("xva$" + i, tree.varargsElement, true); } + return rcvr; + } + + JCExpression getReceiverExpression() { + return receiverExpression; + } + + private JCExpression makeReceiver(VarSymbol rcvr) { + if (rcvr == null) return null; + JCExpression rcvrExpr = make.Ident(rcvr); + Type rcvrType = tree.sym.enclClass().type; + if (rcvrType == syms.arrayClass.type) { + // Map the receiver type to the actually type, not just "array" + rcvrType = tree.getQualifierExpression().type; + } + if (!rcvr.type.tsym.isSubClass(rcvrType.tsym, types)) { + rcvrExpr = make.TypeCast(make.Type(rcvrType), rcvrExpr).setType(rcvrType); + } + return rcvrExpr; + } + /** - * determine the receiver of the bridged method call - the receiver can - * be either the synthetic receiver parameter or a type qualifier; the - * original qualifier expression is never used here, as it might refer - * to symbols not available in the static context of the bridge + * determine the receiver of the method call - the receiver can + * be a type qualifier, the synthetic receiver parameter or 'super'. */ - private JCExpression bridgeExpressionInvoke(JCExpression rcvr) { + private JCExpression expressionInvoke(VarSymbol rcvr) { JCExpression qualifier = tree.sym.isStatic() ? make.Type(tree.sym.owner.type) : (rcvr != null) ? - rcvr : + makeReceiver(rcvr) : tree.getQualifierExpression(); //create the qualifier expression @@ -882,10 +899,9 @@ } /** - * the enclosing expression is either 'null' (no enclosing type) or set - * to the first bridge synthetic parameter + * Lambda body to use for a 'new'. */ - private JCExpression bridgeExpressionNew() { + private JCExpression expressionNew() { if (tree.kind == ReferenceKind.ARRAY_CTOR) { //create the array creation expression JCNewArray newArr = make.NewArray( @@ -895,15 +911,10 @@ newArr.type = tree.getQualifierExpression().type; return newArr; } else { - JCExpression encl = null; - switch (tree.kind) { - case UNBOUND: - case IMPLICIT_INNER: - encl = make.Ident(params.first()); - } - //create the instance creation expression - JCNewClass newClass = make.NewClass(encl, + //note that method reference syntax does not allow an explicit + //enclosing class (so the enclosing class is null) + JCNewClass newClass = make.NewClass(null, List.nil(), make.Type(tree.getQualifierExpression().type), convertArgs(tree.sym, args.toList(), tree.varargsElement), @@ -917,7 +928,8 @@ } private VarSymbol addParameter(String name, Type p, boolean genArg) { - VarSymbol vsym = new VarSymbol(0, names.fromString(name), p, localContext.bridgeSym); + VarSymbol vsym = new VarSymbol(PARAMETER | SYNTHETIC, names.fromString(name), p, owner); + vsym.pos = tree.pos; params.append(make.VarDef(vsym, null)); if (genArg) { args.append(make.Ident(vsym)); @@ -926,15 +938,6 @@ } } - /** - * Bridges a member reference - this is needed when: - * * Var args in the referenced method need to be flattened away - * * super is used - */ - private void bridgeMemberReference(JCMemberReference tree, ReferenceTranslationContext localContext) { - kInfo.addMethod(new MemberReferenceBridger(tree, localContext).bridge()); - } - private MethodType typeToMethodType(Type mt) { Type type = types.erasure(mt); return new MethodType(type.getParameterTypes(), @@ -1254,9 +1257,25 @@ @Override public void visitLambda(JCLambda tree) { + analyzeLambda(tree, "lambda.stat"); + } + + private void analyzeLambda(JCLambda tree, JCExpression methodReferenceReceiver) { + // Translation of the receiver expression must occur first + JCExpression rcvr = translate(methodReferenceReceiver); + LambdaTranslationContext context = analyzeLambda(tree, "mref.stat.1"); + if (rcvr != null) { + context.methodReferenceReceiver = rcvr; + } + } + + private LambdaTranslationContext analyzeLambda(JCLambda tree, String statKey) { List prevStack = frameStack; try { - LambdaTranslationContext context = (LambdaTranslationContext)makeLambdaContext(tree); + LambdaTranslationContext context = new LambdaTranslationContext(tree); + if (dumpLambdaToMethodStats) { + log.note(tree, statKey, context.needsAltMetafactory(), context.translatedSym); + } frameStack = frameStack.prepend(new Frame(tree)); for (JCVariableDecl param : tree.params) { context.addSymbol(param.sym, PARAM); @@ -1265,6 +1284,7 @@ contextMap.put(tree, context); super.visitLambda(tree); context.complete(); + return context; } finally { frameStack = prevStack; @@ -1353,47 +1373,24 @@ * information added in the LambdaToMethod pass will have the wrong * signature. Hooks between Lower and LambdaToMethod have been added to * handle normal "new" in this case. This visitor converts potentially - * effected method references into a lambda containing a normal "new" of - * the class. + * affected method references into a lambda containing a normal + * expression. * * @param tree */ @Override public void visitReference(JCMemberReference tree) { - if (tree.getMode() == ReferenceMode.NEW - && tree.kind != ReferenceKind.ARRAY_CTOR - && tree.sym.owner.isLocal()) { - MethodSymbol consSym = (MethodSymbol) tree.sym; - List ptypes = ((MethodType) consSym.type).getParameterTypes(); - Type classType = consSym.owner.type; - - // Build lambda parameters - // partially cloned from TreeMaker.Params until 8014021 is fixed - Symbol owner = owner(); - ListBuffer paramBuff = new ListBuffer(); - int i = 0; - for (List l = ptypes; l.nonEmpty(); l = l.tail) { - JCVariableDecl param = make.Param(make.paramName(i++), l.head, owner); - param.sym.pos = tree.pos; - paramBuff.append(param); - } - List params = paramBuff.toList(); - - // Make new-class call - JCNewClass nc = makeNewClass(classType, make.Idents(params)); - nc.pos = tree.pos; - - // Make lambda holding the new-class call - JCLambda slam = make.Lambda(params, nc); - slam.targets = tree.targets; - slam.type = tree.type; - slam.pos = tree.pos; - - // Now it is a lambda, process as such - visitLambda(slam); + ReferenceTranslationContext rcontext = new ReferenceTranslationContext(tree); + contextMap.put(tree, rcontext); + if (rcontext.needsConversionToLambda()) { + // Convert to a lambda, and process as such + MemberReferenceToLambda conv = new MemberReferenceToLambda(tree, rcontext, owner()); + analyzeLambda(conv.lambda(), conv.getReceiverExpression()); } else { super.visitReference(tree); - contextMap.put(tree, makeReferenceContext(tree)); + if (dumpLambdaToMethodStats) { + log.note(tree, "mref.stat", rcontext.needsAltMetafactory(), null); + } } } @@ -1648,14 +1645,6 @@ } } - private TranslationContext makeLambdaContext(JCLambda tree) { - return new LambdaTranslationContext(tree); - } - - private TranslationContext makeReferenceContext(JCMemberReference tree) { - return new ReferenceTranslationContext(tree); - } - private class Frame { final JCTree tree; List locals; @@ -1775,6 +1764,13 @@ */ final Set freeVarProcessedLocalClasses; + /** + * For method references converted to lambdas. The method + * reference receiver expression. Must be treated like a captured + * variable. + */ + JCExpression methodReferenceReceiver; + LambdaTranslationContext(JCLambda tree) { super(tree); Frame frame = frameStack.head; @@ -1794,9 +1790,6 @@ // This symbol will be filled-in in complete this.translatedSym = makePrivateSyntheticMethod(0, null, null, owner.enclClass()); - if (dumpLambdaToMethodStats) { - log.note(tree, "lambda.stat", needsAltMetafactory(), translatedSym); - } translatedSymbols = new EnumMap<>(LambdaSymbolKind.class); translatedSymbols.put(PARAM, new LinkedHashMap()); @@ -1994,7 +1987,11 @@ // If instance access isn't needed, make it static. // Interface instance methods must be default methods. // Lambda methods are private synthetic. + // Inherit ACC_STRICT from the enclosing method, or, for clinit, + // from the class. translatedSym.flags_field = SYNTHETIC | LAMBDA_METHOD | + owner.flags_field & STRICTFP | + owner.owner.flags_field & STRICTFP | PRIVATE | (thisReferenced? (inInterface? DEFAULT : 0) : STATIC); @@ -2009,6 +2006,13 @@ for (Symbol thisSym : getSymbolMap(CAPTURED_VAR).values()) { params.append(make.VarDef((VarSymbol) thisSym, null)); } + if (methodReferenceReceiver != null) { + params.append(make.VarDef( + make.Modifiers(PARAMETER|FINAL), + names.fromString("$rcvr$"), + make.Type(methodReferenceReceiver.type), + null)); + } for (Symbol thisSym : getSymbolMap(PARAM).values()) { params.append(make.VarDef((VarSymbol) thisSym, null)); } @@ -2036,40 +2040,27 @@ * and the used by the main translation routines in order to adjust method * references (i.e. in case a bridge is needed) */ - private class ReferenceTranslationContext extends TranslationContext { + private final class ReferenceTranslationContext extends TranslationContext { final boolean isSuper; - final Symbol bridgeSym; final Symbol sigPolySym; ReferenceTranslationContext(JCMemberReference tree) { super(tree); this.isSuper = tree.hasKind(ReferenceKind.SUPER); - this.bridgeSym = needsBridge() - ? makePrivateSyntheticMethod(isSuper ? 0 : STATIC, - referenceBridgeName(), null, - owner.enclClass()) - : null; this.sigPolySym = isSignaturePolymorphic() ? makePrivateSyntheticMethod(tree.sym.flags(), tree.sym.name, bridgedRefSig(), tree.sym.enclClass()) : null; - if (dumpLambdaToMethodStats) { - String key = bridgeSym == null ? - "mref.stat" : "mref.stat.1"; - log.note(tree, key, needsAltMetafactory(), bridgeSym); - } } /** * Get the opcode associated with this method reference */ int referenceKind() { - return LambdaToMethod.this.referenceKind(needsBridge() - ? bridgeSym - : tree.sym); + return LambdaToMethod.this.referenceKind(tree.sym); } boolean needsVarArgsConversion() { @@ -2077,62 +2068,6 @@ } /** - * Generate a disambiguating string to increase stability (important - * if serialized) - * - * @return String to differentiate synthetic lambda method names - */ - private String referenceBridgeDisambiguation() { - StringBuilder buf = new StringBuilder(); - // Append the enclosing method signature to differentiate - // overloaded enclosing methods. - if (owner.type != null) { - buf.append(typeSig(owner.type)); - buf.append(":"); - } - - // Append qualifier type - buf.append(classSig(tree.sym.owner.type)); - - // Note static/instance - buf.append(tree.sym.isStatic()? " S " : " I "); - - // Append referenced signature - buf.append(typeSig(tree.sym.erasure(types))); - - return buf.toString(); - } - - /** - * Construct a unique stable name for the method reference bridge - * - * @return Name to use for the synthetic method name - */ - private Name referenceBridgeName() { - StringBuilder buf = new StringBuilder(); - // Append lambda ID, this is semantically significant - buf.append(names.lambda); - // Note that it is a method reference bridge - buf.append("MR$"); - // Append the enclosing method name - buf.append(enclosingMethodName()); - buf.append('$'); - // Append the referenced method name - buf.append(syntheticMethodNameComponent(tree.sym.name)); - buf.append('$'); - // Append a hash of the disambiguating string : enclosing method - // signature, etc. - String disam = referenceBridgeDisambiguation(); - buf.append(Integer.toHexString(disam.hashCode())); - buf.append('$'); - // The above appended name components may not be unique, append - // a count based on the above name components. - buf.append(syntheticMethodNameCounts.getIndex(buf)); - String result = buf.toString(); - return names.fromString(result); - } - - /** * @return Is this an array operation like clone() */ boolean isArrayOp() { @@ -2167,13 +2102,40 @@ } /** - * Does this reference needs a bridge (i.e. var args need to be - * expanded or "super" is used) + * Erasure destroys the implementation parameter subtype + * relationship for intersection types */ - final boolean needsBridge() { - return isSuper || needsVarArgsConversion() || isArrayOp() || + boolean interfaceParameterIsIntersectionType() { + List tl = tree.getDescriptorType(types).getParameterTypes(); + if (tree.kind == ReferenceKind.UNBOUND) { + tl = tl.tail; + } + for (; tl.nonEmpty(); tl = tl.tail) { + Type pt = tl.head; + if (pt.getKind() == TypeKind.TYPEVAR) { + TypeVar tv = (TypeVar) pt; + if (tv.bound.getKind() == TypeKind.INTERSECTION) { + return true; + } + } + } + return false; + } + + /** + * Does this reference need to be converted to a lambda + * (i.e. var args need to be expanded or "super" is used) + */ + final boolean needsConversionToLambda() { + return interfaceParameterIsIntersectionType() || + isSuper || + needsVarArgsConversion() || + isArrayOp() || isPrivateInOtherClass() || - !receiverAccessible(); + !receiverAccessible() || + (tree.getMode() == ReferenceMode.NEW && + tree.kind != ReferenceKind.ARRAY_CTOR && + (tree.sym.owner.isLocal() || tree.sym.owner.isInner())); } Type generatedRefSig() { --- ./langtools/src/share/classes/com/sun/tools/javac/comp/MemberEnter.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/comp/MemberEnter.java Wed Feb 04 12:14:46 2015 -0800 @@ -575,51 +575,46 @@ Env localEnv = methodEnv(tree, env); - annotate.enterStart(); + DiagnosticPosition prevLintPos = deferredLintHandler.setPos(tree.pos()); try { - DiagnosticPosition prevLintPos = deferredLintHandler.setPos(tree.pos()); - try { - // Compute the method type - m.type = signature(m, tree.typarams, tree.params, - tree.restype, tree.recvparam, - tree.thrown, - localEnv); - } finally { - deferredLintHandler.setPos(prevLintPos); - } + // Compute the method type + m.type = signature(m, tree.typarams, tree.params, + tree.restype, tree.recvparam, + tree.thrown, + localEnv); + } finally { + deferredLintHandler.setPos(prevLintPos); + } - if (types.isSignaturePolymorphic(m)) { - m.flags_field |= SIGNATURE_POLYMORPHIC; - } + if (types.isSignaturePolymorphic(m)) { + m.flags_field |= SIGNATURE_POLYMORPHIC; + } - // Set m.params - ListBuffer params = new ListBuffer(); - JCVariableDecl lastParam = null; - for (List l = tree.params; l.nonEmpty(); l = l.tail) { - JCVariableDecl param = lastParam = l.head; - params.append(Assert.checkNonNull(param.sym)); - } - m.params = params.toList(); + // Set m.params + ListBuffer params = new ListBuffer(); + JCVariableDecl lastParam = null; + for (List l = tree.params; l.nonEmpty(); l = l.tail) { + JCVariableDecl param = lastParam = l.head; + params.append(Assert.checkNonNull(param.sym)); + } + m.params = params.toList(); - // mark the method varargs, if necessary - if (lastParam != null && (lastParam.mods.flags & Flags.VARARGS) != 0) - m.flags_field |= Flags.VARARGS; + // mark the method varargs, if necessary + if (lastParam != null && (lastParam.mods.flags & Flags.VARARGS) != 0) + m.flags_field |= Flags.VARARGS; - localEnv.info.scope.leave(); - if (chk.checkUnique(tree.pos(), m, enclScope)) { - enclScope.enter(m); - } + localEnv.info.scope.leave(); + if (chk.checkUnique(tree.pos(), m, enclScope)) { + enclScope.enter(m); + } - annotateLater(tree.mods.annotations, localEnv, m, tree.pos()); - // Visit the signature of the method. Note that - // TypeAnnotate doesn't descend into the body. - typeAnnotate(tree, localEnv, m, tree.pos()); + annotateLater(tree.mods.annotations, localEnv, m, tree.pos()); + // Visit the signature of the method. Note that + // TypeAnnotate doesn't descend into the body. + typeAnnotate(tree, localEnv, m, tree.pos()); - if (tree.defaultValue != null) - annotateDefaultValueLater(tree.defaultValue, localEnv, m); - } finally { - annotate.enterDone(); - } + if (tree.defaultValue != null) + annotateDefaultValueLater(tree.defaultValue, localEnv, m); } /** Create a fresh environment for method bodies. @@ -647,54 +642,49 @@ localEnv.info.staticLevel++; } DiagnosticPosition prevLintPos = deferredLintHandler.setPos(tree.pos()); - annotate.enterStart(); try { - try { - if (TreeInfo.isEnumInit(tree)) { - attr.attribIdentAsEnumType(localEnv, (JCIdent)tree.vartype); - } else { - attr.attribType(tree.vartype, localEnv); - if (TreeInfo.isReceiverParam(tree)) - checkReceiver(tree, localEnv); - } - } finally { - deferredLintHandler.setPos(prevLintPos); + if (TreeInfo.isEnumInit(tree)) { + attr.attribIdentAsEnumType(localEnv, (JCIdent)tree.vartype); + } else { + attr.attribType(tree.vartype, localEnv); + if (TreeInfo.isReceiverParam(tree)) + checkReceiver(tree, localEnv); } + } finally { + deferredLintHandler.setPos(prevLintPos); + } - if ((tree.mods.flags & VARARGS) != 0) { - //if we are entering a varargs parameter, we need to - //replace its type (a plain array type) with the more - //precise VarargsType --- we need to do it this way - //because varargs is represented in the tree as a - //modifier on the parameter declaration, and not as a - //distinct type of array node. - ArrayType atype = (ArrayType)tree.vartype.type.unannotatedType(); - tree.vartype.type = atype.makeVarargs(); + if ((tree.mods.flags & VARARGS) != 0) { + //if we are entering a varargs parameter, we need to + //replace its type (a plain array type) with the more + //precise VarargsType --- we need to do it this way + //because varargs is represented in the tree as a + //modifier on the parameter declaration, and not as a + //distinct type of array node. + ArrayType atype = (ArrayType)tree.vartype.type.unannotatedType(); + tree.vartype.type = atype.makeVarargs(); + } + Scope enclScope = enter.enterScope(env); + VarSymbol v = + new VarSymbol(0, tree.name, tree.vartype.type, enclScope.owner); + v.flags_field = chk.checkFlags(tree.pos(), tree.mods.flags, v, tree); + tree.sym = v; + if (tree.init != null) { + v.flags_field |= HASINIT; + if ((v.flags_field & FINAL) != 0 && + needsLazyConstValue(tree.init)) { + Env initEnv = getInitEnv(tree, env); + initEnv.info.enclVar = v; + v.setLazyConstValue(initEnv(tree, initEnv), attr, tree); } - Scope enclScope = enter.enterScope(env); - VarSymbol v = - new VarSymbol(0, tree.name, tree.vartype.type, enclScope.owner); - v.flags_field = chk.checkFlags(tree.pos(), tree.mods.flags, v, tree); - tree.sym = v; - if (tree.init != null) { - v.flags_field |= HASINIT; - if ((v.flags_field & FINAL) != 0 && - needsLazyConstValue(tree.init)) { - Env initEnv = getInitEnv(tree, env); - initEnv.info.enclVar = v; - v.setLazyConstValue(initEnv(tree, initEnv), attr, tree); - } - } - if (chk.checkUnique(tree.pos(), v, enclScope)) { - chk.checkTransparentVar(tree.pos(), v, enclScope); - enclScope.enter(v); - } - annotateLater(tree.mods.annotations, localEnv, v, tree.pos()); - typeAnnotate(tree.vartype, env, v, tree.pos()); - v.pos = tree.pos; - } finally { - annotate.enterDone(); } + if (chk.checkUnique(tree.pos(), v, enclScope)) { + chk.checkTransparentVar(tree.pos(), v, enclScope); + enclScope.enter(v); + } + annotateLater(tree.mods.annotations, localEnv, v, tree.pos()); + typeAnnotate(tree.vartype, env, v, tree.pos()); + v.pos = tree.pos; } // where void checkType(JCTree tree, Type type, String diag) { @@ -1030,189 +1020,194 @@ JCClassDecl tree = (JCClassDecl)env.tree; boolean wasFirst = isFirst; isFirst = false; + try { + annotate.enterStart(); - JavaFileObject prev = log.useSource(env.toplevel.sourcefile); - DiagnosticPosition prevLintPos = deferredLintHandler.setPos(tree.pos()); - try { - // Save class environment for later member enter (2) processing. - halfcompleted.append(env); + JavaFileObject prev = log.useSource(env.toplevel.sourcefile); + DiagnosticPosition prevLintPos = deferredLintHandler.setPos(tree.pos()); + try { + // Save class environment for later member enter (2) processing. + halfcompleted.append(env); - // Mark class as not yet attributed. - c.flags_field |= UNATTRIBUTED; + // Mark class as not yet attributed. + c.flags_field |= UNATTRIBUTED; - // If this is a toplevel-class, make sure any preceding import - // clauses have been seen. - if (c.owner.kind == PCK) { - memberEnter(env.toplevel, env.enclosing(TOPLEVEL)); - todo.append(env); + // If this is a toplevel-class, make sure any preceding import + // clauses have been seen. + if (c.owner.kind == PCK) { + memberEnter(env.toplevel, env.enclosing(TOPLEVEL)); + todo.append(env); + } + + if (c.owner.kind == TYP) + c.owner.complete(); + + // create an environment for evaluating the base clauses + Env baseEnv = baseEnv(tree, env); + + if (tree.extending != null) + typeAnnotate(tree.extending, baseEnv, sym, tree.pos()); + for (JCExpression impl : tree.implementing) + typeAnnotate(impl, baseEnv, sym, tree.pos()); + annotate.flush(); + + // Determine supertype. + Type supertype = + (tree.extending != null) + ? attr.attribBase(tree.extending, baseEnv, true, false, true) + : ((tree.mods.flags & Flags.ENUM) != 0) + ? attr.attribBase(enumBase(tree.pos, c), baseEnv, + true, false, false) + : (c.fullname == names.java_lang_Object) + ? Type.noType + : syms.objectType; + ct.supertype_field = modelMissingTypes(supertype, tree.extending, false); + + // Determine interfaces. + ListBuffer interfaces = new ListBuffer(); + ListBuffer all_interfaces = null; // lazy init + Set interfaceSet = new HashSet(); + List interfaceTrees = tree.implementing; + for (JCExpression iface : interfaceTrees) { + Type i = attr.attribBase(iface, baseEnv, false, true, true); + if (i.hasTag(CLASS)) { + interfaces.append(i); + if (all_interfaces != null) all_interfaces.append(i); + chk.checkNotRepeated(iface.pos(), types.erasure(i), interfaceSet); + } else { + if (all_interfaces == null) + all_interfaces = new ListBuffer().appendList(interfaces); + all_interfaces.append(modelMissingTypes(i, iface, true)); + } + } + if ((c.flags_field & ANNOTATION) != 0) { + ct.interfaces_field = List.of(syms.annotationType); + ct.all_interfaces_field = ct.interfaces_field; + } else { + ct.interfaces_field = interfaces.toList(); + ct.all_interfaces_field = (all_interfaces == null) + ? ct.interfaces_field : all_interfaces.toList(); + } + + if (c.fullname == names.java_lang_Object) { + if (tree.extending != null) { + chk.checkNonCyclic(tree.extending.pos(), + supertype); + ct.supertype_field = Type.noType; + } + else if (tree.implementing.nonEmpty()) { + chk.checkNonCyclic(tree.implementing.head.pos(), + ct.interfaces_field.head); + ct.interfaces_field = List.nil(); + } + } + + // Annotations. + // In general, we cannot fully process annotations yet, but we + // can attribute the annotation types and then check to see if the + // @Deprecated annotation is present. + attr.attribAnnotationTypes(tree.mods.annotations, baseEnv); + if (hasDeprecatedAnnotation(tree.mods.annotations)) + c.flags_field |= DEPRECATED; + annotateLater(tree.mods.annotations, baseEnv, c, tree.pos()); + // class type parameters use baseEnv but everything uses env + + chk.checkNonCyclicDecl(tree); + + attr.attribTypeVariables(tree.typarams, baseEnv); + // Do this here, where we have the symbol. + for (JCTypeParameter tp : tree.typarams) + typeAnnotate(tp, baseEnv, sym, tree.pos()); + + // Add default constructor if needed. + if ((c.flags() & INTERFACE) == 0 && + !TreeInfo.hasConstructors(tree.defs)) { + List argtypes = List.nil(); + List typarams = List.nil(); + List thrown = List.nil(); + long ctorFlags = 0; + boolean based = false; + boolean addConstructor = true; + JCNewClass nc = null; + if (c.name.isEmpty()) { + nc = (JCNewClass)env.next.tree; + if (nc.constructor != null) { + addConstructor = nc.constructor.kind != ERR; + Type superConstrType = types.memberType(c.type, + nc.constructor); + argtypes = superConstrType.getParameterTypes(); + typarams = superConstrType.getTypeArguments(); + ctorFlags = nc.constructor.flags() & VARARGS; + if (nc.encl != null) { + argtypes = argtypes.prepend(nc.encl.type); + based = true; + } + thrown = superConstrType.getThrownTypes(); + } + } + if (addConstructor) { + MethodSymbol basedConstructor = nc != null ? + (MethodSymbol)nc.constructor : null; + JCTree constrDef = DefaultConstructor(make.at(tree.pos), c, + basedConstructor, + typarams, argtypes, thrown, + ctorFlags, based); + tree.defs = tree.defs.prepend(constrDef); + } + } + + // enter symbols for 'this' into current scope. + VarSymbol thisSym = + new VarSymbol(FINAL | HASINIT, names._this, c.type, c); + thisSym.pos = Position.FIRSTPOS; + env.info.scope.enter(thisSym); + // if this is a class, enter symbol for 'super' into current scope. + if ((c.flags_field & INTERFACE) == 0 && + ct.supertype_field.hasTag(CLASS)) { + VarSymbol superSym = + new VarSymbol(FINAL | HASINIT, names._super, + ct.supertype_field, c); + superSym.pos = Position.FIRSTPOS; + env.info.scope.enter(superSym); + } + + // check that no package exists with same fully qualified name, + // but admit classes in the unnamed package which have the same + // name as a top-level package. + if (checkClash && + c.owner.kind == PCK && c.owner != syms.unnamedPackage && + reader.packageExists(c.fullname)) { + log.error(tree.pos, "clash.with.pkg.of.same.name", Kinds.kindName(sym), c); + } + if (c.owner.kind == PCK && (c.flags_field & PUBLIC) == 0 && + !env.toplevel.sourcefile.isNameCompatible(c.name.toString(),JavaFileObject.Kind.SOURCE)) { + c.flags_field |= AUXILIARY; + } + } catch (CompletionFailure ex) { + chk.completionError(tree.pos(), ex); + } finally { + deferredLintHandler.setPos(prevLintPos); + log.useSource(prev); } - if (c.owner.kind == TYP) - c.owner.complete(); - - // create an environment for evaluating the base clauses - Env baseEnv = baseEnv(tree, env); - - if (tree.extending != null) - typeAnnotate(tree.extending, baseEnv, sym, tree.pos()); - for (JCExpression impl : tree.implementing) - typeAnnotate(impl, baseEnv, sym, tree.pos()); - annotate.flush(); - - // Determine supertype. - Type supertype = - (tree.extending != null) - ? attr.attribBase(tree.extending, baseEnv, true, false, true) - : ((tree.mods.flags & Flags.ENUM) != 0) - ? attr.attribBase(enumBase(tree.pos, c), baseEnv, - true, false, false) - : (c.fullname == names.java_lang_Object) - ? Type.noType - : syms.objectType; - ct.supertype_field = modelMissingTypes(supertype, tree.extending, false); - - // Determine interfaces. - ListBuffer interfaces = new ListBuffer(); - ListBuffer all_interfaces = null; // lazy init - Set interfaceSet = new HashSet(); - List interfaceTrees = tree.implementing; - for (JCExpression iface : interfaceTrees) { - Type i = attr.attribBase(iface, baseEnv, false, true, true); - if (i.hasTag(CLASS)) { - interfaces.append(i); - if (all_interfaces != null) all_interfaces.append(i); - chk.checkNotRepeated(iface.pos(), types.erasure(i), interfaceSet); - } else { - if (all_interfaces == null) - all_interfaces = new ListBuffer().appendList(interfaces); - all_interfaces.append(modelMissingTypes(i, iface, true)); + // Enter all member fields and methods of a set of half completed + // classes in a second phase. + if (wasFirst) { + try { + while (halfcompleted.nonEmpty()) { + Env toFinish = halfcompleted.next(); + finish(toFinish); + if (allowTypeAnnos) { + typeAnnotations.organizeTypeAnnotationsSignatures(toFinish, (JCClassDecl)toFinish.tree); + typeAnnotations.validateTypeAnnotationsSignatures(toFinish, (JCClassDecl)toFinish.tree); + } + } + } finally { + isFirst = true; } } - if ((c.flags_field & ANNOTATION) != 0) { - ct.interfaces_field = List.of(syms.annotationType); - ct.all_interfaces_field = ct.interfaces_field; - } else { - ct.interfaces_field = interfaces.toList(); - ct.all_interfaces_field = (all_interfaces == null) - ? ct.interfaces_field : all_interfaces.toList(); - } - - if (c.fullname == names.java_lang_Object) { - if (tree.extending != null) { - chk.checkNonCyclic(tree.extending.pos(), - supertype); - ct.supertype_field = Type.noType; - } - else if (tree.implementing.nonEmpty()) { - chk.checkNonCyclic(tree.implementing.head.pos(), - ct.interfaces_field.head); - ct.interfaces_field = List.nil(); - } - } - - // Annotations. - // In general, we cannot fully process annotations yet, but we - // can attribute the annotation types and then check to see if the - // @Deprecated annotation is present. - attr.attribAnnotationTypes(tree.mods.annotations, baseEnv); - if (hasDeprecatedAnnotation(tree.mods.annotations)) - c.flags_field |= DEPRECATED; - annotateLater(tree.mods.annotations, baseEnv, c, tree.pos()); - // class type parameters use baseEnv but everything uses env - - chk.checkNonCyclicDecl(tree); - - attr.attribTypeVariables(tree.typarams, baseEnv); - // Do this here, where we have the symbol. - for (JCTypeParameter tp : tree.typarams) - typeAnnotate(tp, baseEnv, sym, tree.pos()); - - // Add default constructor if needed. - if ((c.flags() & INTERFACE) == 0 && - !TreeInfo.hasConstructors(tree.defs)) { - List argtypes = List.nil(); - List typarams = List.nil(); - List thrown = List.nil(); - long ctorFlags = 0; - boolean based = false; - boolean addConstructor = true; - JCNewClass nc = null; - if (c.name.isEmpty()) { - nc = (JCNewClass)env.next.tree; - if (nc.constructor != null) { - addConstructor = nc.constructor.kind != ERR; - Type superConstrType = types.memberType(c.type, - nc.constructor); - argtypes = superConstrType.getParameterTypes(); - typarams = superConstrType.getTypeArguments(); - ctorFlags = nc.constructor.flags() & VARARGS; - if (nc.encl != null) { - argtypes = argtypes.prepend(nc.encl.type); - based = true; - } - thrown = superConstrType.getThrownTypes(); - } - } - if (addConstructor) { - MethodSymbol basedConstructor = nc != null ? - (MethodSymbol)nc.constructor : null; - JCTree constrDef = DefaultConstructor(make.at(tree.pos), c, - basedConstructor, - typarams, argtypes, thrown, - ctorFlags, based); - tree.defs = tree.defs.prepend(constrDef); - } - } - - // enter symbols for 'this' into current scope. - VarSymbol thisSym = - new VarSymbol(FINAL | HASINIT, names._this, c.type, c); - thisSym.pos = Position.FIRSTPOS; - env.info.scope.enter(thisSym); - // if this is a class, enter symbol for 'super' into current scope. - if ((c.flags_field & INTERFACE) == 0 && - ct.supertype_field.hasTag(CLASS)) { - VarSymbol superSym = - new VarSymbol(FINAL | HASINIT, names._super, - ct.supertype_field, c); - superSym.pos = Position.FIRSTPOS; - env.info.scope.enter(superSym); - } - - // check that no package exists with same fully qualified name, - // but admit classes in the unnamed package which have the same - // name as a top-level package. - if (checkClash && - c.owner.kind == PCK && c.owner != syms.unnamedPackage && - reader.packageExists(c.fullname)) { - log.error(tree.pos, "clash.with.pkg.of.same.name", Kinds.kindName(sym), c); - } - if (c.owner.kind == PCK && (c.flags_field & PUBLIC) == 0 && - !env.toplevel.sourcefile.isNameCompatible(c.name.toString(),JavaFileObject.Kind.SOURCE)) { - c.flags_field |= AUXILIARY; - } - } catch (CompletionFailure ex) { - chk.completionError(tree.pos(), ex); } finally { - deferredLintHandler.setPos(prevLintPos); - log.useSource(prev); - } - - // Enter all member fields and methods of a set of half completed - // classes in a second phase. - if (wasFirst) { - try { - while (halfcompleted.nonEmpty()) { - Env toFinish = halfcompleted.next(); - finish(toFinish); - if (allowTypeAnnos) { - typeAnnotations.organizeTypeAnnotationsSignatures(toFinish, (JCClassDecl)toFinish.tree); - typeAnnotations.validateTypeAnnotationsSignatures(toFinish, (JCClassDecl)toFinish.tree); - } - } - } finally { - isFirst = true; - } + annotate.enterDone(); } } --- ./langtools/src/share/classes/com/sun/tools/javac/comp/Resolve.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/comp/Resolve.java Wed Feb 04 12:14:46 2015 -0800 @@ -96,6 +96,7 @@ public final boolean varargsEnabled; public final boolean allowMethodHandles; public final boolean allowFunctionalInterfaceMostSpecific; + public final boolean checkVarargsAccessAfterResolution; private final boolean debugResolve; private final boolean compactMethodDiags; final EnumSet verboseResolutionMode; @@ -137,6 +138,8 @@ Target target = Target.instance(context); allowMethodHandles = target.hasMethodHandles(); allowFunctionalInterfaceMostSpecific = source.allowFunctionalInterfaceMostSpecific(); + checkVarargsAccessAfterResolution = + source.allowPostApplicabilityVarargsAccessCheck(); polymorphicSignatureScope = new Scope(syms.noSymbol); inapplicableMethodException = new InapplicableMethodException(diags); @@ -835,9 +838,15 @@ super.argumentsAcceptable(env, deferredAttrContext, argtypes, formals, warn); //should we expand formals? if (deferredAttrContext.phase.isVarargsRequired()) { - //check varargs element type accessibility - varargsAccessible(env, types.elemtype(formals.last()), - deferredAttrContext.inferenceContext); + Type typeToCheck = null; + if (!checkVarargsAccessAfterResolution) { + typeToCheck = types.elemtype(formals.last()); + } else if (deferredAttrContext.mode == AttrMode.CHECK) { + typeToCheck = types.erasure(types.elemtype(formals.last())); + } + if (typeToCheck != null) { + varargsAccessible(env, typeToCheck, deferredAttrContext.inferenceContext); + } } } @@ -948,9 +957,10 @@ } public boolean compatible(Type found, Type req, Warner warn) { + InferenceContext inferenceContext = deferredAttrContext.inferenceContext; return strict ? - types.isSubtypeUnchecked(found, deferredAttrContext.inferenceContext.asUndetVar(req), warn) : - types.isConvertible(found, deferredAttrContext.inferenceContext.asUndetVar(req), warn); + types.isSubtypeUnchecked(inferenceContext.asUndetVar(found), inferenceContext.asUndetVar(req), warn) : + types.isConvertible(inferenceContext.asUndetVar(found), inferenceContext.asUndetVar(req), warn); } public void report(DiagnosticPosition pos, JCDiagnostic details) { @@ -3035,7 +3045,7 @@ /** * Should lookup stop at given phase with given result */ - protected boolean shouldStop(Symbol sym, MethodResolutionPhase phase) { + final boolean shouldStop(Symbol sym, MethodResolutionPhase phase) { return phase.ordinal() > maxPhase.ordinal() || sym.kind < ERRONEOUS || sym.kind == AMBIGUOUS; } @@ -3210,7 +3220,7 @@ super(referenceTree, name, site, argtypes.tail, typeargtypes, maxPhase); if (site.isRaw() && !argtypes.head.hasTag(NONE)) { Type asSuperSite = types.asSuper(argtypes.head, site.tsym); - this.site = asSuperSite; + this.site = types.capture(asSuperSite); } } @@ -4218,15 +4228,39 @@ VARARITY(true, true) { @Override public Symbol mergeResults(Symbol bestSoFar, Symbol sym) { - switch (sym.kind) { - case WRONG_MTH: - return (bestSoFar.kind == WRONG_MTH || bestSoFar.kind == WRONG_MTHS) ? - bestSoFar : - sym; - case ABSENT_MTH: - return bestSoFar; - default: - return sym; + //Check invariants (see {@code LookupHelper.shouldStop}) + Assert.check(bestSoFar.kind >= ERRONEOUS && bestSoFar.kind != AMBIGUOUS); + if (sym.kind < ERRONEOUS) { + //varargs resolution successful + return sym; + } else { + //pick best error + switch (bestSoFar.kind) { + case WRONG_MTH: + case WRONG_MTHS: + //Override previous errors if they were caused by argument mismatch. + //This generally means preferring current symbols - but we need to pay + //attention to the fact that the varargs lookup returns 'less' candidates + //than the previous rounds, and adjust that accordingly. + switch (sym.kind) { + case WRONG_MTH: + //if the previous round matched more than one method, return that + //result instead + return bestSoFar.kind == WRONG_MTHS ? + bestSoFar : sym; + case ABSENT_MTH: + //do not override erroneous symbol if the arity lookup did not + //match any method + return bestSoFar; + case WRONG_MTHS: + default: + //safe to override + return sym; + } + default: + //otherwise, return first error + return bestSoFar; + } } } }; --- ./langtools/src/share/classes/com/sun/tools/javac/jvm/Code.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/jvm/Code.java Wed Feb 04 12:14:46 2015 -0800 @@ -1953,12 +1953,12 @@ } } - public void closeRange(char end) { - if (isLastRangeInitialized()) { + public void closeRange(char length) { + if (isLastRangeInitialized() && length > 0) { Range range = lastRange(); if (range != null) { if (range.length == Character.MAX_VALUE) { - range.length = end; + range.length = length; } } } else { @@ -2017,13 +2017,12 @@ List locals = lvtRanges.getVars(meth, tree); for (LocalVar localVar: lvar) { for (VarSymbol aliveLocal : locals) { - if (localVar == null) { - return; - } - if (localVar.sym == aliveLocal && localVar.lastRange() != null) { - char length = (char)(closingCP - localVar.lastRange().start_pc); - if (length > 0 && length < Character.MAX_VALUE) { - localVar.closeRange(length); + if (localVar != null) { + if (localVar.sym == aliveLocal && localVar.lastRange() != null) { + char length = (char)(closingCP - localVar.lastRange().start_pc); + if (length < Character.MAX_VALUE) { + localVar.closeRange(length); + } } } } @@ -2032,12 +2031,11 @@ void adjustAliveRanges(int oldCP, int delta) { for (LocalVar localVar: lvar) { - if (localVar == null) { - return; - } - for (LocalVar.Range range: localVar.aliveRanges) { - if (range.closed() && range.start_pc + range.length >= oldCP) { - range.length += delta; + if (localVar != null) { + for (LocalVar.Range range: localVar.aliveRanges) { + if (range.closed() && range.start_pc + range.length >= oldCP) { + range.length += delta; + } } } } @@ -2093,7 +2091,7 @@ lvar[adr].isLastRangeInitialized()) { LocalVar v = lvar[adr]; char length = (char)(curCP() - v.lastRange().start_pc); - if (length > 0 && length < Character.MAX_VALUE) { + if (length < Character.MAX_VALUE) { lvar[adr] = v.dup(); v.closeRange(length); putVar(v); --- ./langtools/src/share/classes/com/sun/tools/javac/jvm/Gen.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/jvm/Gen.java Wed Feb 04 12:14:46 2015 -0800 @@ -74,6 +74,7 @@ private Name accessDollar; private final Types types; private final Lower lower; + private final Flow flow; /** Switch: GJ mode? */ @@ -125,6 +126,7 @@ stringBufferAppend = new HashMap(); accessDollar = names. fromString("access" + target.syntheticNameChar()); + flow = Flow.instance(context); lower = Lower.instance(context); Options options = Options.instance(context); @@ -2516,9 +2518,7 @@ */ if (varDebugInfo && (cdef.sym.flags() & SYNTHETIC) == 0) { try { - LVTAssignAnalyzer lvtAssignAnalyzer = LVTAssignAnalyzer.make( - lvtRanges, syms, names); - lvtAssignAnalyzer.analyzeTree(localEnv); + new LVTAssignAnalyzer().analyzeTree(localEnv); } catch (Throwable e) { throw e; } @@ -2609,11 +2609,10 @@ } } - static class LVTAssignAnalyzer + class LVTAssignAnalyzer extends Flow.AbstractAssignAnalyzer { final LVTBits lvtInits; - final LVTRanges lvtRanges; /* This class is anchored to a context dependent tree. The tree can * vary inside the same instruction for example in the switch instruction @@ -2621,35 +2620,12 @@ * to a given case. The aim is to always anchor the bits to the tree * capable of closing a DA range. */ - static class LVTBits extends Bits { - - enum BitsOpKind { - INIT, - CLEAR, - INCL_BIT, - EXCL_BIT, - ASSIGN, - AND_SET, - OR_SET, - DIFF_SET, - XOR_SET, - INCL_RANGE, - EXCL_RANGE, - } + class LVTBits extends Bits { JCTree currentTree; - LVTAssignAnalyzer analyzer; private int[] oldBits = null; BitsState stateBeforeOp; - LVTBits() { - super(false); - } - - LVTBits(int[] bits, BitsState initState) { - super(bits, initState); - } - @Override public void clear() { generalOp(null, -1, BitsOpKind.CLEAR); @@ -2757,12 +2733,11 @@ if (currentTree != null && stateBeforeOp != BitsState.UNKNOWN && trackTree(currentTree)) { - List locals = - analyzer.lvtRanges - .getVars(analyzer.currentMethod, currentTree); + List locals = lvtRanges + .getVars(currentMethod, currentTree); locals = locals != null ? locals : List.nil(); - for (JCVariableDecl vardecl : analyzer.vardecls) { + for (JCVariableDecl vardecl : vardecls) { //once the first is null, the rest will be so. if (vardecl == null) { break; @@ -2772,7 +2747,7 @@ } } if (!locals.isEmpty()) { - analyzer.lvtRanges.setEntry(analyzer.currentMethod, + lvtRanges.setEntry(currentMethod, currentTree, locals); } } @@ -2789,8 +2764,8 @@ boolean trackVar(VarSymbol var) { return (var.owner.kind == MTH && - (var.flags() & (PARAMETER | HASINIT)) == 0 && - analyzer.trackable(var)); + (var.flags() & PARAMETER) == 0 && + trackable(var)); } boolean trackTree(JCTree tree) { @@ -2806,7 +2781,8 @@ } - public class LVTAssignPendingExit extends Flow.AssignAnalyzer.AssignPendingExit { + public class LVTAssignPendingExit extends + Flow.AbstractAssignAnalyzer.AbstractAssignPendingExit { LVTAssignPendingExit(JCTree tree, final Bits inits, final Bits uninits) { super(tree, inits, uninits); @@ -2819,16 +2795,10 @@ } } - private LVTAssignAnalyzer(LVTRanges lvtRanges, Symtab syms, Names names) { - super(new LVTBits(), syms, names, false); - lvtInits = (LVTBits)inits; - this.lvtRanges = lvtRanges; - } - - public static LVTAssignAnalyzer make(LVTRanges lvtRanges, Symtab syms, Names names) { - LVTAssignAnalyzer result = new LVTAssignAnalyzer(lvtRanges, syms, names); - result.lvtInits.analyzer = result; - return result; + private LVTAssignAnalyzer() { + flow.super(); + lvtInits = new LVTBits(); + inits = lvtInits; } @Override --- ./langtools/src/share/classes/com/sun/tools/javac/parser/DocCommentParser.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/parser/DocCommentParser.java Wed Feb 04 12:14:46 2015 -0800 @@ -1039,7 +1039,7 @@ } /** - * @see Javadoc Tags + * @see Javadoc Tags */ private void initTagParsers() { TagParser[] parsers = { --- ./langtools/src/share/classes/com/sun/tools/javac/util/Bits.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/util/Bits.java Wed Feb 04 12:14:46 2015 -0800 @@ -84,6 +84,20 @@ } + public enum BitsOpKind { + INIT, + CLEAR, + INCL_BIT, + EXCL_BIT, + ASSIGN, + AND_SET, + OR_SET, + DIFF_SET, + XOR_SET, + INCL_RANGE, + EXCL_RANGE, + } + private final static int wordlen = 32; private final static int wordshift = 5; private final static int wordmask = wordlen - 1; --- ./langtools/src/share/classes/com/sun/tools/javac/util/Convert.java Mon Dec 08 12:30:44 2014 -0800 +++ ./langtools/src/share/classes/com/sun/tools/javac/util/Convert.java Wed Feb 04 12:14:46 2015 -0800 @@ -36,9 +36,9 @@ *

* See also: *