--- poi/src/java/org/apache/poi/poifs/nio/FileBackedDataSource.java (revision 1707437) +++ poi/src/java/org/apache/poi/poifs/nio/FileBackedDataSource.java (working copy) @@ -26,9 +26,13 @@ import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.nio.channels.WritableByteChannel; +import java.util.ArrayList; +import java.util.List; import org.apache.poi.util.IOUtils; +import sun.nio.ch.DirectBuffer; + /** * A POIFS {@link DataSource} backed by a File */ @@ -37,6 +41,13 @@ private boolean writable; // remember file base, which needs to be closed too private RandomAccessFile srcFile; + + // Buffers which map to a file-porition are not closed automatically when the Channel is closed + // therefore we keep the list of mapped buffers and + // See https://bz.apache.org/bugzilla/show_bug.cgi?id=58480, + // http://stackoverflow.com/questions/3602783/file-access-synchronized-on-java-object and + // and http://bugs.java.com/view_bug.do?bug_id=4724038 for related discussions + private List buffersToClean = new ArrayList(); public FileBackedDataSource(File file) throws FileNotFoundException { this(newSrcFile(file, "r"), true); @@ -91,6 +102,11 @@ // Ready it for reading dst.position(0); + // remember the buffer for cleanup if necessary + if(dst instanceof DirectBuffer) { + buffersToClean.add((DirectBuffer)dst); + } + // All done return dst; } @@ -115,7 +131,12 @@ @Override public void close() throws IOException { - if (srcFile != null) { + for(DirectBuffer buffer : buffersToClean) { + buffer.cleaner().clean(); + } + buffersToClean.clear(); + + if (srcFile != null) { // see http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4796385 srcFile.close(); } else { --- poi/src/java/org/apache/poi/hssf/usermodel/HSSFWorkbook.java (revision 1707437) +++ poi/src/java/org/apache/poi/hssf/usermodel/HSSFWorkbook.java (working copy) @@ -1372,36 +1372,38 @@ public void write(OutputStream stream) throws IOException { - byte[] bytes = getBytes(); NPOIFSFileSystem fs = new NPOIFSFileSystem(); - - // For tracking what we've written out, used if we're - // going to be preserving nodes - List excepts = new ArrayList(1); - - // Write out the Workbook stream - fs.createDocument(new ByteArrayInputStream(bytes), "Workbook"); - - // Write out our HPFS properties, if we have them - writeProperties(fs, excepts); - - if (preserveNodes) { - // Don't write out the old Workbook, we'll be doing our new one - // If the file had an "incorrect" name for the workbook stream, - // don't write the old one as we'll use the correct name shortly - excepts.addAll(Arrays.asList(WORKBOOK_DIR_ENTRY_NAMES)); - - // Copy over all the other nodes to our new poifs - EntryUtils.copyNodes( - new FilteringDirectoryNode(this.directory, excepts) - , new FilteringDirectoryNode(fs.getRoot(), excepts) - ); - - // YK: preserve StorageClsid, it is important for embedded workbooks, - // see Bugzilla 47920 - fs.getRoot().setStorageClsid(this.directory.getStorageClsid()); + try { + // For tracking what we've written out, used if we're + // going to be preserving nodes + List excepts = new ArrayList(1); + + // Write out the Workbook stream + fs.createDocument(new ByteArrayInputStream(getBytes()), "Workbook"); + + // Write out our HPFS properties, if we have them + writeProperties(fs, excepts); + + if (preserveNodes) { + // Don't write out the old Workbook, we'll be doing our new one + // If the file had an "incorrect" name for the workbook stream, + // don't write the old one as we'll use the correct name shortly + excepts.addAll(Arrays.asList(WORKBOOK_DIR_ENTRY_NAMES)); + + // Copy over all the other nodes to our new poifs + EntryUtils.copyNodes( + new FilteringDirectoryNode(this.directory, excepts) + , new FilteringDirectoryNode(fs.getRoot(), excepts) + ); + + // YK: preserve StorageClsid, it is important for embedded workbooks, + // see Bugzilla 47920 + fs.getRoot().setStorageClsid(this.directory.getStorageClsid()); + } + fs.writeFilesystem(stream); + } finally { + fs.close(); } - fs.writeFilesystem(stream); } /**