View Javadoc

1   /*
2   Copyright (c) 2005 Health Market Science, Inc.
3   
4   This library is free software; you can redistribute it and/or
5   modify it under the terms of the GNU Lesser General Public
6   License as published by the Free Software Foundation; either
7   version 2.1 of the License, or (at your option) any later version.
8   
9   This library is distributed in the hope that it will be useful,
10  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  Lesser General Public License for more details.
13  
14  You should have received a copy of the GNU Lesser General Public
15  License along with this library; if not, write to the Free Software
16  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
17  USA
18  
19  You can contact Health Market Science at info@healthmarketscience.com
20  or at the following address:
21  
22  Health Market Science
23  2700 Horizon Drive
24  Suite 200
25  King of Prussia, PA 19406
26  */
27  
28  package com.healthmarketscience.jackcess;
29  
30  import java.io.Flushable;
31  import java.io.IOException;
32  import java.nio.ByteBuffer;
33  import java.nio.ByteOrder;
34  import java.nio.channels.Channel;
35  import java.nio.channels.FileChannel;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  
40  /**
41   * Reads and writes individual pages in a database file
42   * @author Tim McCune
43   */
44  public class PageChannel implements Channel, Flushable {
45    
46    private static final Log LOG = LogFactory.getLog(PageChannel.class);
47    
48    static final int INVALID_PAGE_NUMBER = -1;
49  
50    static final ByteOrder DEFAULT_BYTE_ORDER = ByteOrder.LITTLE_ENDIAN;
51    
52    /** invalid page header, used when deallocating old pages.  data pages
53        generally have 4 interesting bytes at the beginning which we want to
54        reset. */
55    private static final byte[] INVALID_PAGE_BYTE_HEADER =
56      new byte[]{PageTypes.INVALID, (byte)0, (byte)0, (byte)0};
57    
58    /** Global usage map always lives on page 1 */
59    static final int PAGE_GLOBAL_USAGE_MAP = 1;
60    /** Global usage map always lives at row 0 */
61    static final int ROW_GLOBAL_USAGE_MAP = 0;
62    
63    /** Channel containing the database */
64    private final FileChannel _channel;
65    /** whether or not the _channel should be closed by this class */
66    private final boolean _closeChannel;
67    /** Format of the database in the channel */
68    private final JetFormat _format;
69    /** whether or not to force all writes to disk immediately */
70    private final  boolean _autoSync;
71    /** buffer used when deallocating old pages.  data pages generally have 4
72        interesting bytes at the beginning which we want to reset. */
73    private final ByteBuffer _invalidPageBytes =
74      ByteBuffer.wrap(INVALID_PAGE_BYTE_HEADER);
75    /** dummy buffer used when allocating new pages */
76    private final ByteBuffer _forceBytes = ByteBuffer.allocate(1);
77    /** Tracks free pages in the database. */
78    private UsageMap _globalUsageMap;
79    /** handler for the current database encoding type */
80    private CodecHandler _codecHandler = DefaultCodecProvider.DUMMY_HANDLER;
81    /** temp page buffer used when pages cannot be partially encoded */
82    private final TempPageHolder _fullPageEncodeBufferH =
83      TempPageHolder.newHolder(TempBufferHolder.Type.SOFT);
84    
85    /**
86     * @param channel Channel containing the database
87     * @param format Format of the database in the channel
88     */
89    public PageChannel(FileChannel channel, boolean closeChannel,
90                       JetFormat format, boolean autoSync)
91      throws IOException
92    {
93      _channel = channel;
94      _closeChannel = closeChannel;
95      _format = format;
96      _autoSync = autoSync;
97    }
98  
99    /**
100    * Does second-stage initialization, must be called after construction.
101    */
102   public void initialize(Database database, CodecProvider codecProvider)
103     throws IOException
104   {
105     // initialize page en/decoding support
106     _codecHandler = codecProvider.createHandler(this, database.getCharset());
107 
108     // note the global usage map is a special map where any page outside of
109     // the current range is assumed to be "on"
110     _globalUsageMap = UsageMap.read(database, PAGE_GLOBAL_USAGE_MAP,
111                                     ROW_GLOBAL_USAGE_MAP, true);
112   }
113   
114   /**
115    * Only used by unit tests
116    */
117   PageChannel(boolean testing) {
118     if(!testing) {
119       throw new IllegalArgumentException();
120     }
121     _channel = null;
122     _closeChannel = false;
123     _format = JetFormat.VERSION_4;
124     _autoSync = false;
125   }
126 
127   public JetFormat getFormat() {
128     return _format;
129   }
130 
131   public boolean isAutoSync() {
132     return _autoSync;
133   }
134 
135   /**
136    * Returns the next page number based on the given file size.
137    */
138   private int getNextPageNumber(long size) {
139     return (int)(size / getFormat().PAGE_SIZE);
140   }
141 
142   /**
143    * Returns the offset for a page within the file.
144    */
145   private long getPageOffset(int pageNumber) {
146     return((long) pageNumber * (long) getFormat().PAGE_SIZE);
147   }
148   
149   /**
150    * Validates that the given pageNumber is valid for this database.
151    */
152   private void validatePageNumber(int pageNumber)
153     throws IOException
154   {
155     int nextPageNumber = getNextPageNumber(_channel.size());
156     if((pageNumber <= INVALID_PAGE_NUMBER) || (pageNumber >= nextPageNumber)) {
157       throw new IllegalStateException("invalid page number " + pageNumber);
158     }
159   }
160   
161   /**
162    * @param buffer Buffer to read the page into
163    * @param pageNumber Number of the page to read in (starting at 0)
164    */
165   public void readPage(ByteBuffer buffer, int pageNumber)
166     throws IOException
167   {
168     validatePageNumber(pageNumber);
169     if (LOG.isDebugEnabled()) {
170       LOG.debug("Reading in page " + Integer.toHexString(pageNumber));
171     }
172     buffer.clear();
173     int bytesRead = _channel.read(
174         buffer, (long) pageNumber * (long) getFormat().PAGE_SIZE);
175     buffer.flip();
176     if(bytesRead != getFormat().PAGE_SIZE) {
177       throw new IOException("Failed attempting to read " +
178                             getFormat().PAGE_SIZE + " bytes from page " +
179                             pageNumber + ", only read " + bytesRead);
180     }
181 
182     if(pageNumber == 0) {
183       // de-mask header (note, page 0 never has additional encoding)
184       applyHeaderMask(buffer);
185     } else {
186       _codecHandler.decodePage(buffer, pageNumber);
187     }
188   }
189   
190   /**
191    * Write a page to disk
192    * @param page Page to write
193    * @param pageNumber Page number to write the page to
194    */
195   public void writePage(ByteBuffer page, int pageNumber) throws IOException {
196     writePage(page, pageNumber, 0);
197   }
198   
199   /**
200    * Write a page (or part of a page) to disk
201    * @param page Page to write
202    * @param pageNumber Page number to write the page to
203    * @param pageOffset offset within the page at which to start writing the
204    *                   page data
205    */
206   public void writePage(ByteBuffer page, int pageNumber, int pageOffset)
207     throws IOException
208   {
209     validatePageNumber(pageNumber);
210     
211     page.rewind().position(pageOffset);
212 
213     int writeLen = page.remaining();
214     if((writeLen + pageOffset) > getFormat().PAGE_SIZE) {
215       throw new IllegalArgumentException(
216           "Page buffer is too large, size " + (writeLen + pageOffset));
217     }
218     
219     ByteBuffer encodedPage = page;
220     if(pageNumber == 0) {
221       // re-mask header
222       applyHeaderMask(page);
223     } else {
224 
225       if(!_codecHandler.canEncodePartialPage()) {
226         if((pageOffset > 0) && (writeLen < getFormat().PAGE_SIZE)) {
227 
228           // current codec handler cannot encode part of a page, so need to
229           // copy the modified part into the current page contents in a temp
230           // buffer so that we can encode the entire page
231           ByteBuffer fullPage = _fullPageEncodeBufferH.setPage(
232               this, pageNumber);
233 
234           // copy the modified part to the full page
235           fullPage.position(pageOffset);
236           fullPage.put(page);
237           fullPage.rewind();
238 
239           // reset so we can write the whole page
240           page = fullPage;
241           pageOffset = 0;
242           writeLen = getFormat().PAGE_SIZE;
243 
244         } else {
245 
246           _fullPageEncodeBufferH.possiblyInvalidate(pageNumber, null);
247         }
248       }
249 
250       // re-encode page
251       encodedPage = _codecHandler.encodePage(page, pageNumber, pageOffset);
252 
253       // reset position/limit in case they were affected by encoding
254       encodedPage.position(pageOffset).limit(pageOffset + writeLen);
255     }
256 
257     try {
258       _channel.write(encodedPage, (getPageOffset(pageNumber) + pageOffset));
259       if(_autoSync) {
260         flush();
261       }
262     } finally {
263       if(pageNumber == 0) {
264         // de-mask header
265         applyHeaderMask(page);
266       }
267     }
268   }
269   
270   /**
271    * Allocates a new page in the database.  Data in the page is undefined
272    * until it is written in a call to {@link #writePage(ByteBuffer,int)}.
273    */
274   public int allocateNewPage() throws IOException {
275     // this will force the file to be extended with mostly undefined bytes
276     long size = _channel.size();
277     if(size >= getFormat().MAX_DATABASE_SIZE) {
278       throw new IOException("Database is at maximum size " +
279                             getFormat().MAX_DATABASE_SIZE);
280     }
281     if((size % getFormat().PAGE_SIZE) != 0L) {
282       throw new IOException("Database corrupted, file size " + size +
283                             " is not multiple of page size " +
284                             getFormat().PAGE_SIZE);
285     }
286     
287     _forceBytes.rewind();
288     
289     // push the buffer to the end of the page, so that a full page's worth of
290     // data is written
291     int pageOffset = (getFormat().PAGE_SIZE - _forceBytes.remaining());
292     long offset = size + pageOffset;
293     int pageNumber = getNextPageNumber(size);
294 
295     // since we are just allocating page space at this point and not writing
296     // meaningful data, we do _not_ encode the page.
297     _channel.write(_forceBytes, offset);
298 
299     // note, we "force" page removal because we know that this is an unused
300     // page (since we just added it to the file)
301     _globalUsageMap.removePageNumber(pageNumber, true);
302     return pageNumber;
303   }
304 
305   /**
306    * Deallocate a previously used page in the database.
307    */
308   public void deallocatePage(int pageNumber) throws IOException {
309     validatePageNumber(pageNumber);
310     
311     // don't write the whole page, just wipe out the header (which should be
312     // enough to let us know if we accidentally try to use an invalid page)
313     _invalidPageBytes.rewind();
314     _channel.write(_invalidPageBytes, getPageOffset(pageNumber));
315     
316     _globalUsageMap.addPageNumber(pageNumber);  //force is done here
317   }
318   
319   /**
320    * @return A newly-allocated buffer that can be passed to readPage
321    */
322   public ByteBuffer createPageBuffer() {
323     return createBuffer(getFormat().PAGE_SIZE);
324   }
325 
326   /**
327    * @return A newly-allocated buffer of the given size and DEFAULT_BYTE_ORDER
328    *         byte order
329    */
330   public ByteBuffer createBuffer(int size) {
331     return createBuffer(size, DEFAULT_BYTE_ORDER);
332   }
333   
334   /**
335    * @return A newly-allocated buffer of the given size and byte order
336    */
337   public ByteBuffer createBuffer(int size, ByteOrder order) {
338     return ByteBuffer.allocate(size).order(order);
339   }
340   
341   public void flush() throws IOException {
342     _channel.force(true);
343   }
344   
345   public void close() throws IOException {
346     flush();
347     if(_closeChannel) {
348       _channel.close();
349     }
350   }
351   
352   public boolean isOpen() {
353     return _channel.isOpen();
354   }
355 
356   /**
357    * Applies the XOR mask to the database header in the given buffer.
358    */
359   private void applyHeaderMask(ByteBuffer buffer) {
360       // de/re-obfuscate the header
361       byte[] headerMask = _format.HEADER_MASK;
362       for(int idx = 0; idx < headerMask.length; ++idx) {
363         int pos = idx + _format.OFFSET_MASKED_HEADER;
364         byte b = (byte)(buffer.get(pos) ^ headerMask[idx]);
365         buffer.put(pos, b);
366       }
367   }
368 
369   /**
370    * @return a duplicate of the current buffer narrowed to the given position
371    *         and limit.  mark will be set at the current position.
372    */
373   public static ByteBuffer narrowBuffer(ByteBuffer buffer, int position,
374                                         int limit)
375   {
376     return (ByteBuffer)buffer.duplicate()
377       .order(buffer.order())
378       .clear()
379       .limit(limit)
380       .position(position)
381       .mark();
382   }
383 
384   /**
385    * Returns a ByteBuffer wrapping the given bytes and configured with the
386    * default byte order.
387    */
388   public static ByteBuffer wrap(byte[] bytes) {
389     return ByteBuffer.wrap(bytes).order(DEFAULT_BYTE_ORDER);
390   }
391 }