diff --git a/jhdf/src/main/java/io/jhdf/AbstractNode.java b/jhdf/src/main/java/io/jhdf/AbstractNode.java index ec65785a..f3a87629 100644 --- a/jhdf/src/main/java/io/jhdf/AbstractNode.java +++ b/jhdf/src/main/java/io/jhdf/AbstractNode.java @@ -56,14 +56,14 @@ protected Map initialize() throws ConcurrentException { if (attributeInfoMessage.getFractalHeapAddress() != Constants.UNDEFINED_ADDRESS) { // Create the heap and btree - FractalHeap fractalHeap = new FractalHeap(hdfFc, attributeInfoMessage.getFractalHeapAddress()); - BTreeV2 btree = new BTreeV2<>(hdfFc, + FractalHeap fractalHeap = new FractalHeap(hdfBackingStorage, attributeInfoMessage.getFractalHeapAddress()); + BTreeV2 btree = new BTreeV2<>(hdfBackingStorage, attributeInfoMessage.getAttributeNameBTreeAddress()); // Read the attribute messages from the btree+heap for (AttributeNameForIndexedAttributesRecord attributeRecord : btree.getRecords()) { ByteBuffer bb = fractalHeap.getId(attributeRecord.getHeapId()); - AttributeMessage attributeMessage = new AttributeMessage(bb, hdfFc, + AttributeMessage attributeMessage = new AttributeMessage(bb, hdfBackingStorage, attributeRecord.getFlags()); logger.trace("Read attribute message '{}'", attributeMessage); attributeMessages.add(attributeMessage); @@ -77,25 +77,25 @@ protected Map initialize() throws ConcurrentException { return attributeMessages.stream() .collect( toMap(AttributeMessage::getName, - message -> new AttributeImpl(hdfFc, AbstractNode.this, message))); + message -> new AttributeImpl(hdfBackingStorage, AbstractNode.this, message))); } } - private final HdfBackingStorage hdfFc; + private final HdfBackingStorage hdfBackingStorage; protected final long address; protected final String name; protected final Group parent; protected final LazyInitializer header; protected final AttributesLazyInitializer attributes; - protected AbstractNode(HdfBackingStorage hdfFc, long address, String name, Group parent) { - this.hdfFc = hdfFc; + protected AbstractNode(HdfBackingStorage hdfBackingStorage, long address, String name, Group parent) { + this.hdfBackingStorage = hdfBackingStorage; this.address = address; this.name = name; this.parent = parent; try { - header = ObjectHeader.lazyReadObjectHeader(hdfFc, address); + header = ObjectHeader.lazyReadObjectHeader(hdfBackingStorage, address); // Attributes attributes = new AttributesLazyInitializer(header); diff --git a/jhdf/src/main/java/io/jhdf/AttributeImpl.java b/jhdf/src/main/java/io/jhdf/AttributeImpl.java index e8b8836b..709ec97c 100644 --- a/jhdf/src/main/java/io/jhdf/AttributeImpl.java +++ b/jhdf/src/main/java/io/jhdf/AttributeImpl.java @@ -25,13 +25,13 @@ public class AttributeImpl implements Attribute { private static final Logger logger = LoggerFactory.getLogger(AttributeImpl.class); - private final HdfBackingStorage hdfFc; + private final HdfBackingStorage hdfBackingStorage; private final Node node; private final String name; private final AttributeMessage message; - public AttributeImpl(HdfBackingStorage hdfFc, Node node, AttributeMessage message) { - this.hdfFc = hdfFc; + public AttributeImpl(HdfBackingStorage hdfBackingStorage, Node node, AttributeMessage message) { + this.hdfBackingStorage = hdfBackingStorage; this.node = node; this.name = message.getName(); this.message = message; @@ -70,7 +70,7 @@ public Object getData() { } DataType type = message.getDataType(); ByteBuffer bb = message.getDataBuffer(); - return DatasetReader.readDataset(type, bb, getDimensions(), hdfFc); + return DatasetReader.readDataset(type, bb, getDimensions(), hdfBackingStorage); } @Override diff --git a/jhdf/src/main/java/io/jhdf/CommittedDatatype.java b/jhdf/src/main/java/io/jhdf/CommittedDatatype.java index b0f3e689..111bd854 100644 --- a/jhdf/src/main/java/io/jhdf/CommittedDatatype.java +++ b/jhdf/src/main/java/io/jhdf/CommittedDatatype.java @@ -19,8 +19,8 @@ public class CommittedDatatype extends AbstractNode { - public CommittedDatatype(HdfBackingStorage hdfFc, long address, String name, Group parent) { - super(hdfFc, address, name, parent); + public CommittedDatatype(HdfBackingStorage hdfBackingStorage, long address, String name, Group parent) { + super(hdfBackingStorage, address, name, parent); } @Override diff --git a/jhdf/src/main/java/io/jhdf/FractalHeap.java b/jhdf/src/main/java/io/jhdf/FractalHeap.java index 71cedb6c..2b12b664 100644 --- a/jhdf/src/main/java/io/jhdf/FractalHeap.java +++ b/jhdf/src/main/java/io/jhdf/FractalHeap.java @@ -59,7 +59,7 @@ public class FractalHeap { private static final BigInteger TWO = BigInteger.valueOf(2L); private final long address; - private final HdfBackingStorage hdfFc; + private final HdfBackingStorage hdfBackingStorage; private final Superblock sb; private final int maxDirectBlockSize; @@ -95,15 +95,15 @@ public class FractalHeap { private final int bytesToStoreOffset; private final int bytesToStoreLength; - public FractalHeap(HdfBackingStorage hdfFc, long address) { - this.hdfFc = hdfFc; - this.sb = hdfFc.getSuperblock(); + public FractalHeap(HdfBackingStorage hdfBackingStorage, long address) { + this.hdfBackingStorage = hdfBackingStorage; + this.sb = hdfBackingStorage.getSuperblock(); this.address = address; final int headerSize = 4 + 1 + 2 + 2 + 1 + 4 + 12 * sb.getSizeOfLengths() + 3 * sb.getSizeOfOffsets() + 2 + 2 + 2 + 2 + 4; - ByteBuffer bb = hdfFc.readBufferFromAddress(address, headerSize); + ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, headerSize); byte[] formatSignatureBytes = new byte[4]; bb.get(formatSignatureBytes, 0, formatSignatureBytes.length); @@ -228,7 +228,7 @@ public ByteBuffer getId(ByteBuffer buffer) { } BTreeV2 hugeObjectBTree = - new BTreeV2<>(this.hdfFc, this.bTreeAddressOfHugeObjects); + new BTreeV2<>(this.hdfBackingStorage, this.bTreeAddressOfHugeObjects); if (hugeObjectBTree.getRecords().size() != 1) { throw new UnsupportedHdfException("Only Huge objects BTrees with 1 record are currently supported"); @@ -236,7 +236,7 @@ public ByteBuffer getId(ByteBuffer buffer) { HugeFractalHeapObjectUnfilteredRecord ho = hugeObjectBTree.getRecords().get(0); - return this.hdfFc.readBufferFromAddress(ho.getAddress(), (int) ho.getLength()); + return this.hdfBackingStorage.readBufferFromAddress(ho.getAddress(), (int) ho.getLength()); case 2: // Tiny objects throw new UnsupportedHdfException("Tiny objects are currently not supported"); default: @@ -252,7 +252,7 @@ private IndirectBlock(long address) { final int headerSize = 4 + 1 + sb.getSizeOfOffsets() + bytesToStoreOffset + currentRowsInRootIndirectBlock * tableWidth * getRowSize() + 4; - ByteBuffer bb = hdfFc.readBufferFromAddress(address, headerSize); + ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, headerSize); byte[] formatSignatureBytes = new byte[4]; bb.get(formatSignatureBytes, 0, formatSignatureBytes.length); @@ -319,7 +319,7 @@ private DirectBlock(long address) { final int headerSize = 4 + 1 + sb.getSizeOfOffsets() + bytesToStoreOffset + 4; - ByteBuffer bb = hdfFc.readBufferFromAddress(address, headerSize); + ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, headerSize); byte[] formatSignatureBytes = new byte[4]; bb.get(formatSignatureBytes, 0, formatSignatureBytes.length); @@ -342,7 +342,7 @@ private DirectBlock(long address) { blockOffset = readBytesAsUnsignedLong(bb, bytesToStoreOffset); - data = hdfFc.map(address, getSizeOfDirectBlock(blockIndex)); + data = hdfBackingStorage.map(address, getSizeOfDirectBlock(blockIndex)); if (checksumPresent()) { int storedChecksum = bb.getInt(); diff --git a/jhdf/src/main/java/io/jhdf/GlobalHeap.java b/jhdf/src/main/java/io/jhdf/GlobalHeap.java index 3dd9deb3..7f6d89e9 100644 --- a/jhdf/src/main/java/io/jhdf/GlobalHeap.java +++ b/jhdf/src/main/java/io/jhdf/GlobalHeap.java @@ -26,19 +26,19 @@ public class GlobalHeap { private static final byte[] GLOBAL_HEAP_SIGNATURE = "GCOL".getBytes(StandardCharsets.US_ASCII); - private final HdfBackingStorage hdfFc; + private final HdfBackingStorage hdfBackingStorage; private final long address; private final Map objects = new HashMap<>(); - public GlobalHeap(HdfBackingStorage hdfFc, long address) { - this.hdfFc = hdfFc; + public GlobalHeap(HdfBackingStorage hdfBackingStorage, long address) { + this.hdfBackingStorage = hdfBackingStorage; this.address = address; try { - int headerSize = 4 + 1 + 3 + hdfFc.getSizeOfLengths(); + int headerSize = 4 + 1 + 3 + hdfBackingStorage.getSizeOfLengths(); - ByteBuffer bb = hdfFc.readBufferFromAddress(address, headerSize); + ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, headerSize); byte[] signatureBytes = new byte[4]; bb.get(signatureBytes, 0, signatureBytes.length); @@ -56,12 +56,12 @@ public GlobalHeap(HdfBackingStorage hdfFc, long address) { bb.position(8); // Skip past 3 reserved bytes - int collectionSize = readBytesAsUnsignedInt(bb, hdfFc.getSizeOfLengths()); + int collectionSize = readBytesAsUnsignedInt(bb, hdfBackingStorage.getSizeOfLengths()); // Collection size contains size of whole collection, so substract already read bytes - int remainingCollectionSize = collectionSize - 8 - hdfFc.getSizeOfLengths(); + int remainingCollectionSize = collectionSize - 8 - hdfBackingStorage.getSizeOfLengths(); // Now start reading the heap into memory - bb = hdfFc.readBufferFromAddress(address + headerSize, remainingCollectionSize); + bb = hdfBackingStorage.readBufferFromAddress(address + headerSize, remainingCollectionSize); // minimal global heap object is 16 bytes while (bb.remaining() >= 16) { @@ -94,10 +94,10 @@ private GlobalHeapObject(GlobalHeap globalHeap, ByteBuffer bb) { index = readBytesAsUnsignedInt(bb, 2); referenceCount = readBytesAsUnsignedInt(bb, 2); bb.position(bb.position() + 4); // Skip 4 reserved bytes - int size = readBytesAsUnsignedInt(bb, globalHeap.hdfFc.getSizeOfOffsets()); + int size = readBytesAsUnsignedInt(bb, globalHeap.hdfBackingStorage.getSizeOfOffsets()); if (index == 0) { //the size in global heap object 0 is the free space without counting object 0 - size = size - 2 - 2 - 4 - globalHeap.hdfFc.getSizeOfOffsets(); + size = size - 2 - 2 - 4 - globalHeap.hdfBackingStorage.getSizeOfOffsets(); } data = createSubBuffer(bb, size); seekBufferToNextMultipleOfEight(bb); diff --git a/jhdf/src/main/java/io/jhdf/GroupImpl.java b/jhdf/src/main/java/io/jhdf/GroupImpl.java index c962e86e..d2af0b58 100644 --- a/jhdf/src/main/java/io/jhdf/GroupImpl.java +++ b/jhdf/src/main/java/io/jhdf/GroupImpl.java @@ -42,11 +42,11 @@ public class GroupImpl extends AbstractNode implements Group { private final class ChildrenLazyInitializer extends LazyInitializer> { - private final HdfBackingStorage hdfFc; + private final HdfBackingStorage hdfBackingStorage; private final Group parent; - private ChildrenLazyInitializer(HdfBackingStorage hdfFc, Group parent) { - this.hdfFc = hdfFc; + private ChildrenLazyInitializer(HdfBackingStorage hdfBackingStorage, Group parent) { + this.hdfBackingStorage = hdfBackingStorage; this.parent = parent; } @@ -74,9 +74,9 @@ private Map createNewStyleGroup(final ObjectHeader oh) { logger.debug("Loaded group links from object header"); } else { // Links are not stored compactly i.e in the fractal heap - final BTreeV2 bTreeNode = new BTreeV2<>(hdfFc, + final BTreeV2 bTreeNode = new BTreeV2<>(hdfBackingStorage, linkInfoMessage.getBTreeNameIndexAddress()); - final FractalHeap fractalHeap = new FractalHeap(hdfFc, linkInfoMessage.getFractalHeapAddress()); + final FractalHeap fractalHeap = new FractalHeap(hdfBackingStorage, linkInfoMessage.getFractalHeapAddress()); List records = bTreeNode.getRecords(); links = new ArrayList<>(records.size()); @@ -84,7 +84,7 @@ private Map createNewStyleGroup(final ObjectHeader oh) { ByteBuffer id = linkName.getId(); // Get the name data from the fractal heap ByteBuffer bb = fractalHeap.getId(id); - links.add(LinkMessage.fromBuffer(bb, hdfFc.getSuperblock())); + links.add(LinkMessage.fromBuffer(bb, hdfBackingStorage.getSuperblock())); } logger.debug("Loaded group links from fractal heap"); } @@ -114,15 +114,15 @@ private Map createNewStyleGroup(final ObjectHeader oh) { private Map createOldStyleGroup(final ObjectHeader oh) { logger.debug("Loading 'old' style group"); final SymbolTableMessage stm = oh.getMessageOfType(SymbolTableMessage.class); - final BTreeV1 rootBTreeNode = BTreeV1.createGroupBTree(hdfFc, stm.getBTreeAddress()); - final LocalHeap rootNameHeap = new LocalHeap(hdfFc, stm.getLocalHeapAddress()); + final BTreeV1 rootBTreeNode = BTreeV1.createGroupBTree(hdfBackingStorage, stm.getBTreeAddress()); + final LocalHeap rootNameHeap = new LocalHeap(hdfBackingStorage, stm.getLocalHeapAddress()); final ByteBuffer nameBuffer = rootNameHeap.getDataBuffer(); final List childAddresses = rootBTreeNode.getChildAddresses(); final Map lazyChildren = new LinkedHashMap<>(childAddresses.size()); for (long child : childAddresses) { - GroupSymbolTableNode groupSTE = new GroupSymbolTableNode(hdfFc, child); + GroupSymbolTableNode groupSTE = new GroupSymbolTableNode(hdfBackingStorage, child); for (SymbolTableEntry ste : groupSTE.getSymbolTableEntries()) { String childName = readName(nameBuffer, ste.getLinkNameOffset()); final Node node; @@ -132,7 +132,7 @@ private Map createOldStyleGroup(final ObjectHeader oh) { break; case 1: // Cached group logger.trace("Creating group '{}'", childName); - node = createGroup(hdfFc, ste.getObjectHeaderAddress(), childName, parent); + node = createGroup(hdfBackingStorage, ste.getObjectHeaderAddress(), childName, parent); break; case 2: // Soft Link logger.trace("Creating soft link '{}'", childName); @@ -150,20 +150,20 @@ private Map createOldStyleGroup(final ObjectHeader oh) { } private Node createNode(String name, long address) { - final ObjectHeader linkHeader = ObjectHeader.readObjectHeader(hdfFc, address); + final ObjectHeader linkHeader = ObjectHeader.readObjectHeader(hdfBackingStorage, address); final Node node; if (linkHeader.hasMessageOfType(DataSpaceMessage.class)) { // Its a a Dataset logger.trace("Creating dataset [{}]", name); - node = DatasetLoader.createDataset(hdfFc, linkHeader, name, parent); + node = DatasetLoader.createDataset(hdfBackingStorage, linkHeader, name, parent); } else if (linkHeader.hasMessageOfType(DataTypeMessage.class)) { // Has a datatype but no dataspace so its a committed datatype logger.trace("Creating committed data type [{}]", name); - node = new CommittedDatatype(hdfFc, address, name, parent); + node = new CommittedDatatype(hdfBackingStorage, address, name, parent); } else { // Its a group logger.trace("Creating group [{}]", name); - node = createGroup(hdfFc, address, name, parent); + node = createGroup(hdfBackingStorage, address, name, parent); } return node; } @@ -178,11 +178,11 @@ private String readName(ByteBuffer bb, int linkNameOffset) { private final LazyInitializer> children; - private GroupImpl(HdfBackingStorage hdfFc, long address, String name, Group parent) { - super(hdfFc, address, name, parent); + private GroupImpl(HdfBackingStorage hdfBackingStorage, long address, String name, Group parent) { + super(hdfBackingStorage, address, name, parent); logger.trace("Creating group '{}'...", name); - children = new ChildrenLazyInitializer(hdfFc, this); + children = new ChildrenLazyInitializer(hdfBackingStorage, this); logger.debug("Created group '{}'", getPath()); } @@ -190,17 +190,17 @@ private GroupImpl(HdfBackingStorage hdfFc, long address, String name, Group pare /** * This is a special case constructor for the root group. * - * @param hdfFc The file channel for reading the file + * @param hdfBackingStorage The file channel for reading the file * @param objectHeaderAddress The offset into the file of the object header for * this group * @param parent For the root group the parent is the file itself. */ - private GroupImpl(HdfBackingStorage hdfFc, long objectHeaderAddress, HdfFile parent) { - super(hdfFc, objectHeaderAddress, "", parent); // No name special case for root group no name + private GroupImpl(HdfBackingStorage hdfBackingStorage, long objectHeaderAddress, HdfFile parent) { + super(hdfBackingStorage, objectHeaderAddress, "", parent); // No name special case for root group no name logger.trace("Creating root group..."); // Special case for root group pass parent instead of this - children = new ChildrenLazyInitializer(hdfFc, parent); + children = new ChildrenLazyInitializer(hdfBackingStorage, parent); logger.debug("Created root group of file '{}'", parent.getName()); } @@ -209,21 +209,21 @@ private GroupImpl(HdfBackingStorage hdfFc, long objectHeaderAddress, HdfFile par * Creates a group for the specified object header with the given name by * reading from the file channel. * - * @param hdfFc The file channel for reading the file + * @param hdfBackingStorage The file channel for reading the file * @param objectHeaderAddress The offset into the file of the object header for * this group * @param name The name of this group * @param parent For the root group the parent is the file itself. * @return The newly read group */ - /* package */ static Group createGroup(HdfBackingStorage hdfFc, long objectHeaderAddress, String name, + /* package */ static Group createGroup(HdfBackingStorage hdfBackingStorage, long objectHeaderAddress, String name, Group parent) { - return new GroupImpl(hdfFc, objectHeaderAddress, name, parent); + return new GroupImpl(hdfBackingStorage, objectHeaderAddress, name, parent); } - /* package */ static Group createRootGroup(HdfBackingStorage hdfFc, long objectHeaderAddress, HdfFile file) { + /* package */ static Group createRootGroup(HdfBackingStorage hdfBackingStorage, long objectHeaderAddress, HdfFile file) { // Call the special root group constructor - return new GroupImpl(hdfFc, objectHeaderAddress, file); + return new GroupImpl(hdfBackingStorage, objectHeaderAddress, file); } @Override diff --git a/jhdf/src/main/java/io/jhdf/GroupSymbolTableNode.java b/jhdf/src/main/java/io/jhdf/GroupSymbolTableNode.java index cb955140..ffbb7a16 100644 --- a/jhdf/src/main/java/io/jhdf/GroupSymbolTableNode.java +++ b/jhdf/src/main/java/io/jhdf/GroupSymbolTableNode.java @@ -32,11 +32,11 @@ public class GroupSymbolTableNode { private final short numberOfEntries; private final SymbolTableEntry[] symbolTableEntries; - public GroupSymbolTableNode(HdfBackingStorage hdfFc, long address) { + public GroupSymbolTableNode(HdfBackingStorage hdfBackingStorage, long address) { this.address = address; try { int headerSize = 8; - ByteBuffer header = hdfFc.readBufferFromAddress(address, headerSize); + ByteBuffer header = hdfBackingStorage.readBufferFromAddress(address, headerSize); byte[] formatSignatureBytes = new byte[4]; header.get(formatSignatureBytes, 0, formatSignatureBytes.length); @@ -59,12 +59,12 @@ public GroupSymbolTableNode(HdfBackingStorage hdfFc, long address) { numberOfEntries = ByteBuffer.wrap(twoBytes).order(LITTLE_ENDIAN).getShort(); logger.trace("numberOfSymbols = {}", numberOfEntries); - final long symbolTableEntryBytes = hdfFc.getSizeOfOffsets() * 2L + 8L + 16L; + final long symbolTableEntryBytes = hdfBackingStorage.getSizeOfOffsets() * 2L + 8L + 16L; symbolTableEntries = new SymbolTableEntry[numberOfEntries]; for (int i = 0; i < numberOfEntries; i++) { long offset = address + headerSize + i * symbolTableEntryBytes; - symbolTableEntries[i] = new SymbolTableEntry(hdfFc, offset); + symbolTableEntries[i] = new SymbolTableEntry(hdfBackingStorage, offset); } } catch (Exception e) { // TODO improve message diff --git a/jhdf/src/main/java/io/jhdf/LocalHeap.java b/jhdf/src/main/java/io/jhdf/LocalHeap.java index 29f0f636..94da0cf6 100644 --- a/jhdf/src/main/java/io/jhdf/LocalHeap.java +++ b/jhdf/src/main/java/io/jhdf/LocalHeap.java @@ -31,12 +31,12 @@ public class LocalHeap { private final long addressOfDataSegment; private final ByteBuffer dataBuffer; - public LocalHeap(HdfBackingStorage hdfFc, long address) { + public LocalHeap(HdfBackingStorage hdfBackingStorage, long address) { this.address = address; try { // Header - int headerSize = 8 + hdfFc.getSizeOfLengths() + hdfFc.getSizeOfLengths() + hdfFc.getSizeOfOffsets(); - ByteBuffer header = hdfFc.readBufferFromAddress(address, headerSize); + int headerSize = 8 + hdfBackingStorage.getSizeOfLengths() + hdfBackingStorage.getSizeOfLengths() + hdfBackingStorage.getSizeOfOffsets(); + ByteBuffer header = hdfBackingStorage.readBufferFromAddress(address, headerSize); byte[] formatSignatureBytes = new byte[4]; header.get(formatSignatureBytes, 0, formatSignatureBytes.length); @@ -53,18 +53,18 @@ public LocalHeap(HdfBackingStorage hdfFc, long address) { header.position(8); // Data Segment Size - dataSegmentSize = Utils.readBytesAsUnsignedLong(header, hdfFc.getSizeOfLengths()); + dataSegmentSize = Utils.readBytesAsUnsignedLong(header, hdfBackingStorage.getSizeOfLengths()); logger.trace("dataSegmentSize = {}", dataSegmentSize); // Offset to Head of Free-list - offsetToHeadOfFreeList = Utils.readBytesAsUnsignedLong(header, hdfFc.getSizeOfLengths()); + offsetToHeadOfFreeList = Utils.readBytesAsUnsignedLong(header, hdfBackingStorage.getSizeOfLengths()); logger.trace("offsetToHeadOfFreeList = {}", offsetToHeadOfFreeList); // Address of Data Segment - addressOfDataSegment = Utils.readBytesAsUnsignedLong(header, hdfFc.getSizeOfOffsets()); + addressOfDataSegment = Utils.readBytesAsUnsignedLong(header, hdfBackingStorage.getSizeOfOffsets()); logger.trace("addressOfDataSegment = {}", addressOfDataSegment); - dataBuffer = hdfFc.map(addressOfDataSegment, dataSegmentSize); + dataBuffer = hdfBackingStorage.map(addressOfDataSegment, dataSegmentSize); } catch (Exception e) { throw new HdfException("Error reading local heap", e); } diff --git a/jhdf/src/main/java/io/jhdf/ObjectHeader.java b/jhdf/src/main/java/io/jhdf/ObjectHeader.java index a4e037b4..6cb69644 100644 --- a/jhdf/src/main/java/io/jhdf/ObjectHeader.java +++ b/jhdf/src/main/java/io/jhdf/ObjectHeader.java @@ -82,11 +82,11 @@ public static class ObjectHeaderV1 extends ObjectHeader { /** Level of the node 0 = leaf */ private final int referenceCount; - private ObjectHeaderV1(HdfBackingStorage hdfFc, long address) { + private ObjectHeaderV1(HdfBackingStorage hdfBackingStorage, long address) { super(address); try { - ByteBuffer header = hdfFc.readBufferFromAddress(address, 12); + ByteBuffer header = hdfBackingStorage.readBufferFromAddress(address, 12); // Version version = header.get(); @@ -108,9 +108,9 @@ private ObjectHeaderV1(HdfBackingStorage hdfFc, long address) { // 12 up to this point + 4 missed in format spec = 16 address += 16; - header = hdfFc.readBufferFromAddress(address, headerSize); + header = hdfBackingStorage.readBufferFromAddress(address, headerSize); - readMessages(hdfFc, header, numberOfMessages); + readMessages(hdfBackingStorage, header, numberOfMessages); logger.debug("Read object header from address: {}", address); @@ -183,12 +183,12 @@ public static class ObjectHeaderV2 extends ObjectHeader { private final int maximumNumberOfDenseAttributes; private final BitSet flags; - private ObjectHeaderV2(HdfBackingStorage hdfFc, long address) { + private ObjectHeaderV2(HdfBackingStorage hdfBackingStorage, long address) { super(address); int headerSize = 0; // Keep track of the size for checksum try { - ByteBuffer bb = hdfFc.readBufferFromAddress(address, 6); + ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, 6); address += 6; headerSize += 6; @@ -228,7 +228,7 @@ private ObjectHeaderV2(HdfBackingStorage hdfFc, long address) { // Timestamps if (flags.get(TIMESTAMPS_PRESENT)) { - bb = hdfFc.readBufferFromAddress(address, 16); + bb = hdfBackingStorage.readBufferFromAddress(address, 16); address += 16; headerSize += 16; @@ -245,7 +245,7 @@ private ObjectHeaderV2(HdfBackingStorage hdfFc, long address) { // Number of attributes if (flags.get(NUMBER_OF_ATTRIBUTES_PRESENT)) { - bb = hdfFc.readBufferFromAddress(address, 4); + bb = hdfBackingStorage.readBufferFromAddress(address, 4); address += 4; headerSize += 4; @@ -256,22 +256,22 @@ private ObjectHeaderV2(HdfBackingStorage hdfFc, long address) { maximumNumberOfDenseAttributes = -1; } - bb = hdfFc.readBufferFromAddress(address, sizeOfChunk0); + bb = hdfBackingStorage.readBufferFromAddress(address, sizeOfChunk0); address += sizeOfChunk0; headerSize += sizeOfChunk0; int sizeOfMessages = readBytesAsUnsignedInt(bb, sizeOfChunk0); - bb = hdfFc.readBufferFromAddress(address, sizeOfMessages); + bb = hdfBackingStorage.readBufferFromAddress(address, sizeOfMessages); headerSize += sizeOfMessages; // There might be a gap at the end of the header of up to 4 bytes // message type (1_byte) + message size (2 bytes) + message flags (1 byte) - readMessages(hdfFc, bb); + readMessages(hdfBackingStorage, bb); // Checksum headerSize += 4; - ByteBuffer fullHeaderBuffer = hdfFc.readBufferFromAddress(super.getAddress(), headerSize); + ByteBuffer fullHeaderBuffer = hdfBackingStorage.readBufferFromAddress(super.getAddress(), headerSize); ChecksumUtils.validateChecksum(fullHeaderBuffer); logger.debug("Read object header from address: {}", address); @@ -348,24 +348,24 @@ public boolean isAttributeCreationOrderIndexed() { } - public static ObjectHeader readObjectHeader(HdfBackingStorage hdfFc, long address) { - ByteBuffer bb = hdfFc.readBufferFromAddress(address, 1); + public static ObjectHeader readObjectHeader(HdfBackingStorage hdfBackingStorage, long address) { + ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, 1); byte version = bb.get(); if (version == 1) { - return new ObjectHeaderV1(hdfFc, address); + return new ObjectHeaderV1(hdfBackingStorage, address); } else { - return new ObjectHeaderV2(hdfFc, address); + return new ObjectHeaderV2(hdfBackingStorage, address); } } - public static LazyInitializer lazyReadObjectHeader(HdfBackingStorage hdfFc, long address) { + public static LazyInitializer lazyReadObjectHeader(HdfBackingStorage hdfBackingStorage, long address) { logger.debug("Creating lazy object header at address: {}", address); return new LazyInitializer() { @Override protected ObjectHeader initialize() { logger.debug("Lazy initializing object header at address: {}", address); - return readObjectHeader(hdfFc, address); + return readObjectHeader(hdfBackingStorage, address); } }; diff --git a/jhdf/src/main/java/io/jhdf/btree/BTreeV1.java b/jhdf/src/main/java/io/jhdf/btree/BTreeV1.java index b256557e..f090a584 100644 --- a/jhdf/src/main/java/io/jhdf/btree/BTreeV1.java +++ b/jhdf/src/main/java/io/jhdf/btree/BTreeV1.java @@ -43,8 +43,8 @@ public abstract class BTreeV1 { private final long leftSiblingAddress; private final long rightSiblingAddress; - public static BTreeV1Group createGroupBTree(HdfBackingStorage hdfFc, long address) { - ByteBuffer header = readHeaderAndValidateSignature(hdfFc, address); + public static BTreeV1Group createGroupBTree(HdfBackingStorage hdfBackingStorage, long address) { + ByteBuffer header = readHeaderAndValidateSignature(hdfBackingStorage, address); final byte nodeType = header.get(); if (nodeType != 0) { @@ -54,15 +54,15 @@ public static BTreeV1Group createGroupBTree(HdfBackingStorage hdfFc, long addres final byte nodeLevel = header.get(); if (nodeLevel > 0) { - return new BTreeV1Group.BTreeV1GroupNonLeafNode(hdfFc, address); + return new BTreeV1Group.BTreeV1GroupNonLeafNode(hdfBackingStorage, address); } else { - return new BTreeV1Group.BTreeV1GroupLeafNode(hdfFc, address); + return new BTreeV1Group.BTreeV1GroupLeafNode(hdfBackingStorage, address); } } - public static BTreeV1Data createDataBTree(HdfBackingStorage hdfFc, long address, int dataDimensions) { - ByteBuffer header = readHeaderAndValidateSignature(hdfFc, address); + public static BTreeV1Data createDataBTree(HdfBackingStorage hdfBackingStorage, long address, int dataDimensions) { + ByteBuffer header = readHeaderAndValidateSignature(hdfBackingStorage, address); final byte nodeType = header.get(); if (nodeType != 1) { @@ -72,9 +72,9 @@ public static BTreeV1Data createDataBTree(HdfBackingStorage hdfFc, long address, final byte nodeLevel = header.get(); if (nodeLevel > 0) { - return new BTreeV1Data.BTreeV1DataNonLeafNode(hdfFc, address, dataDimensions); + return new BTreeV1Data.BTreeV1DataNonLeafNode(hdfBackingStorage, address, dataDimensions); } else { - return new BTreeV1Data.BTreeV1DataLeafNode(hdfFc, address, dataDimensions); + return new BTreeV1Data.BTreeV1DataLeafNode(hdfBackingStorage, address, dataDimensions); } } @@ -90,19 +90,19 @@ public static ByteBuffer readHeaderAndValidateSignature(HdfBackingStorage fc, lo return header; } - /* package */ BTreeV1(HdfBackingStorage hdfFc, long address) { + /* package */ BTreeV1(HdfBackingStorage hdfBackingStorage, long address) { this.address = address; - int headerSize = 8 * hdfFc.getSizeOfOffsets(); - ByteBuffer header = hdfFc.readBufferFromAddress(address + 6, headerSize); + int headerSize = 8 * hdfBackingStorage.getSizeOfOffsets(); + ByteBuffer header = hdfBackingStorage.readBufferFromAddress(address + 6, headerSize); entriesUsed = Utils.readBytesAsUnsignedInt(header, 2); logger.trace("Entries = {}", entriesUsed); - leftSiblingAddress = Utils.readBytesAsUnsignedLong(header, hdfFc.getSizeOfOffsets()); + leftSiblingAddress = Utils.readBytesAsUnsignedLong(header, hdfBackingStorage.getSizeOfOffsets()); logger.trace("left address = {}", leftSiblingAddress); - rightSiblingAddress = Utils.readBytesAsUnsignedLong(header, hdfFc.getSizeOfOffsets()); + rightSiblingAddress = Utils.readBytesAsUnsignedLong(header, hdfBackingStorage.getSizeOfOffsets()); logger.trace("right address = {}", rightSiblingAddress); } diff --git a/jhdf/src/main/java/io/jhdf/btree/BTreeV1Data.java b/jhdf/src/main/java/io/jhdf/btree/BTreeV1Data.java index 94e05187..68399ba2 100644 --- a/jhdf/src/main/java/io/jhdf/btree/BTreeV1Data.java +++ b/jhdf/src/main/java/io/jhdf/btree/BTreeV1Data.java @@ -30,8 +30,8 @@ */ public abstract class BTreeV1Data extends BTreeV1 { - private BTreeV1Data(HdfBackingStorage hdfFc, long address) { - super(hdfFc, address); + private BTreeV1Data(HdfBackingStorage hdfBackingStorage, long address) { + super(hdfBackingStorage, address); } /** @@ -43,20 +43,20 @@ private BTreeV1Data(HdfBackingStorage hdfFc, long address) { private final ArrayList chunks; - /* package */ BTreeV1DataLeafNode(HdfBackingStorage hdfFc, long address, int dataDimensions) { - super(hdfFc, address); + /* package */ BTreeV1DataLeafNode(HdfBackingStorage hdfBackingStorage, long address, int dataDimensions) { + super(hdfBackingStorage, address); final int keySize = 4 + 4 + (dataDimensions + 1) * 8; final int keyBytes = (entriesUsed + 1) * keySize; - final int childPointerBytes = entriesUsed * hdfFc.getSizeOfOffsets(); + final int childPointerBytes = entriesUsed * hdfBackingStorage.getSizeOfOffsets(); final int keysAndPointersBytes = keyBytes + childPointerBytes; - final long keysAddress = address + 8L + 2L * hdfFc.getSizeOfOffsets(); - final ByteBuffer bb = hdfFc.readBufferFromAddress(keysAddress, keysAndPointersBytes); + final long keysAddress = address + 8L + 2L * hdfBackingStorage.getSizeOfOffsets(); + final ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(keysAddress, keysAndPointersBytes); chunks = new ArrayList<>(entriesUsed); for (int i = 0; i < entriesUsed; i++) { - Chunk chunk = readKeyAsChunk(hdfFc.getSuperblock(), dataDimensions, bb); + Chunk chunk = readKeyAsChunk(hdfBackingStorage.getSuperblock(), dataDimensions, bb); chunks.add(chunk); } @@ -94,23 +94,23 @@ public List getChunks() { private final List childNodes; - /* package */ BTreeV1DataNonLeafNode(HdfBackingStorage hdfFc, long address, int dataDimensions) { - super(hdfFc, address); + /* package */ BTreeV1DataNonLeafNode(HdfBackingStorage hdfBackingStorage, long address, int dataDimensions) { + super(hdfBackingStorage, address); final int keySize = 4 + 4 + (dataDimensions + 1) * 8; final int keyBytes = (entriesUsed + 1) * keySize; - final int childPointerBytes = entriesUsed * hdfFc.getSizeOfOffsets(); + final int childPointerBytes = entriesUsed * hdfBackingStorage.getSizeOfOffsets(); final int keysAndPointersBytes = keyBytes + childPointerBytes; - final long keysAddress = address + 8L + 2L * hdfFc.getSizeOfOffsets(); - final ByteBuffer keysAndPointersBuffer = hdfFc.readBufferFromAddress(keysAddress, keysAndPointersBytes); + final long keysAddress = address + 8L + 2L * hdfBackingStorage.getSizeOfOffsets(); + final ByteBuffer keysAndPointersBuffer = hdfBackingStorage.readBufferFromAddress(keysAddress, keysAndPointersBytes); childNodes = new ArrayList<>(entriesUsed); for (int i = 0; i < entriesUsed; i++) { keysAndPointersBuffer.position(keysAndPointersBuffer.position() + keySize); - long childAddress = Utils.readBytesAsUnsignedLong(keysAndPointersBuffer, hdfFc.getSizeOfOffsets()); - childNodes.add(BTreeV1.createDataBTree(hdfFc, childAddress, dataDimensions)); + long childAddress = Utils.readBytesAsUnsignedLong(keysAndPointersBuffer, hdfBackingStorage.getSizeOfOffsets()); + childNodes.add(BTreeV1.createDataBTree(hdfBackingStorage, childAddress, dataDimensions)); } } diff --git a/jhdf/src/main/java/io/jhdf/btree/BTreeV1Group.java b/jhdf/src/main/java/io/jhdf/btree/BTreeV1Group.java index e782564d..e5e32293 100644 --- a/jhdf/src/main/java/io/jhdf/btree/BTreeV1Group.java +++ b/jhdf/src/main/java/io/jhdf/btree/BTreeV1Group.java @@ -23,29 +23,29 @@ */ public abstract class BTreeV1Group extends BTreeV1 { - private BTreeV1Group(HdfBackingStorage hdfFc, long address) { - super(hdfFc, address); + private BTreeV1Group(HdfBackingStorage hdfBackingStorage, long address) { + super(hdfBackingStorage, address); } /* package */ static class BTreeV1GroupLeafNode extends BTreeV1Group { private final List childAddresses; - /* package */ BTreeV1GroupLeafNode(HdfBackingStorage hdfFc, long address) { - super(hdfFc, address); + /* package */ BTreeV1GroupLeafNode(HdfBackingStorage hdfBackingStorage, long address) { + super(hdfBackingStorage, address); - final int keyBytes = (2 * entriesUsed + 1) * hdfFc.getSizeOfLengths(); - final int childPointerBytes = (2 * entriesUsed) * hdfFc.getSizeOfOffsets(); + final int keyBytes = (2 * entriesUsed + 1) * hdfBackingStorage.getSizeOfLengths(); + final int childPointerBytes = (2 * entriesUsed) * hdfBackingStorage.getSizeOfOffsets(); final int keysAndPointersBytes = keyBytes + childPointerBytes; - final long keysAddress = address + 8L + 2L * hdfFc.getSizeOfOffsets(); - final ByteBuffer keysAndPointersBuffer = hdfFc.readBufferFromAddress(keysAddress, keysAndPointersBytes); + final long keysAddress = address + 8L + 2L * hdfBackingStorage.getSizeOfOffsets(); + final ByteBuffer keysAndPointersBuffer = hdfBackingStorage.readBufferFromAddress(keysAddress, keysAndPointersBytes); childAddresses = new ArrayList<>(entriesUsed); for (int i = 0; i < entriesUsed; i++) { - keysAndPointersBuffer.position(keysAndPointersBuffer.position() + hdfFc.getSizeOfLengths()); - childAddresses.add(Utils.readBytesAsUnsignedLong(keysAndPointersBuffer, hdfFc.getSizeOfOffsets())); + keysAndPointersBuffer.position(keysAndPointersBuffer.position() + hdfBackingStorage.getSizeOfLengths()); + childAddresses.add(Utils.readBytesAsUnsignedLong(keysAndPointersBuffer, hdfBackingStorage.getSizeOfOffsets())); } } @@ -60,22 +60,22 @@ public List getChildAddresses() { private final List childNodes; - /* package */ BTreeV1GroupNonLeafNode(HdfBackingStorage hdfFc, long address) { - super(hdfFc, address); + /* package */ BTreeV1GroupNonLeafNode(HdfBackingStorage hdfBackingStorage, long address) { + super(hdfBackingStorage, address); - final int keyBytes = (2 * entriesUsed + 1) * hdfFc.getSizeOfLengths(); - final int childPointerBytes = (2 * entriesUsed) * hdfFc.getSizeOfOffsets(); + final int keyBytes = (2 * entriesUsed + 1) * hdfBackingStorage.getSizeOfLengths(); + final int childPointerBytes = (2 * entriesUsed) * hdfBackingStorage.getSizeOfOffsets(); final int keysAndPointersBytes = keyBytes + childPointerBytes; - final long keysAddress = address + 8L + 2L * hdfFc.getSizeOfOffsets(); - final ByteBuffer keysAndPointersBuffer = hdfFc.readBufferFromAddress(keysAddress, keysAndPointersBytes); + final long keysAddress = address + 8L + 2L * hdfBackingStorage.getSizeOfOffsets(); + final ByteBuffer keysAndPointersBuffer = hdfBackingStorage.readBufferFromAddress(keysAddress, keysAndPointersBytes); childNodes = new ArrayList<>(entriesUsed); for (int i = 0; i < entriesUsed; i++) { - keysAndPointersBuffer.position(keysAndPointersBuffer.position() + hdfFc.getSizeOfOffsets()); - long childAddress = Utils.readBytesAsUnsignedLong(keysAndPointersBuffer, hdfFc.getSizeOfOffsets()); - childNodes.add(createGroupBTree(hdfFc, childAddress)); + keysAndPointersBuffer.position(keysAndPointersBuffer.position() + hdfBackingStorage.getSizeOfOffsets()); + long childAddress = Utils.readBytesAsUnsignedLong(keysAndPointersBuffer, hdfBackingStorage.getSizeOfOffsets()); + childNodes.add(createGroupBTree(hdfBackingStorage, childAddress)); } } diff --git a/jhdf/src/main/java/io/jhdf/btree/BTreeV2.java b/jhdf/src/main/java/io/jhdf/btree/BTreeV2.java index c9fb8d55..9c5e9997 100644 --- a/jhdf/src/main/java/io/jhdf/btree/BTreeV2.java +++ b/jhdf/src/main/java/io/jhdf/btree/BTreeV2.java @@ -51,16 +51,16 @@ public List getRecords() { return records; } - public BTreeV2(HdfBackingStorage hdfFc, long address) { - this(hdfFc, address, null); + public BTreeV2(HdfBackingStorage hdfBackingStorage, long address) { + this(hdfBackingStorage, address, null); } - public BTreeV2(HdfBackingStorage hdfFc, long address, DatasetInfo datasetInfo) { + public BTreeV2(HdfBackingStorage hdfBackingStorage, long address, DatasetInfo datasetInfo) { this.address = address; try { // B Tree V2 Header - int headerSize = 16 + hdfFc.getSizeOfOffsets() + 2 + hdfFc.getSizeOfLengths() + 4; - ByteBuffer bb = hdfFc.readBufferFromAddress(address, headerSize); + int headerSize = 16 + hdfBackingStorage.getSizeOfOffsets() + 2 + hdfBackingStorage.getSizeOfLengths() + 4; + ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, headerSize); // Verify signature byte[] formatSignatureBytes = new byte[4]; @@ -86,17 +86,17 @@ public BTreeV2(HdfBackingStorage hdfFc, long address, DatasetInfo datasetInfo) { final int splitPercent = Utils.readBytesAsUnsignedInt(bb, 1); final int mergePercent = Utils.readBytesAsUnsignedInt(bb, 1); - final long rootNodeAddress = readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + final long rootNodeAddress = readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); final int numberOfRecordsInRoot = Utils.readBytesAsUnsignedInt(bb, 2); - final int totalNumberOfRecordsInTree = Utils.readBytesAsUnsignedInt(bb, hdfFc.getSizeOfLengths()); + final int totalNumberOfRecordsInTree = Utils.readBytesAsUnsignedInt(bb, hdfBackingStorage.getSizeOfLengths()); bb.rewind(); ChecksumUtils.validateChecksum(bb); records = new ArrayList<>(totalNumberOfRecordsInTree); - readRecords(hdfFc, rootNodeAddress, depth, numberOfRecordsInRoot, totalNumberOfRecordsInTree, datasetInfo); + readRecords(hdfBackingStorage, rootNodeAddress, depth, numberOfRecordsInRoot, totalNumberOfRecordsInTree, datasetInfo); } catch (HdfException e) { throw new HdfException("Error reading B Tree node", e); @@ -104,9 +104,9 @@ public BTreeV2(HdfBackingStorage hdfFc, long address, DatasetInfo datasetInfo) { } - private void readRecords(HdfBackingStorage hdfFc, long address, int depth, int numberOfRecords, int totalRecords, DatasetInfo datasetInfo) { + private void readRecords(HdfBackingStorage hdfBackingStorage, long address, int depth, int numberOfRecords, int totalRecords, DatasetInfo datasetInfo) { - ByteBuffer bb = hdfFc.readBufferFromAddress(address, nodeSize); + ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, nodeSize); byte[] nodeSignatureBytes = new byte[4]; bb.get(nodeSignatureBytes, 0, nodeSignatureBytes.length); @@ -133,9 +133,9 @@ private void readRecords(HdfBackingStorage hdfFc, long address, int depth, int n if (!leafNode) { for (int i = 0; i < numberOfRecords + 1; i++) { - final long childAddress = readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + final long childAddress = readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); int sizeOfNumberOfRecords = getSizeOfNumberOfRecords(nodeSize, depth, totalRecords, recordSize, - hdfFc.getSizeOfOffsets()); + hdfBackingStorage.getSizeOfOffsets()); final int numberOfChildRecords = readBytesAsUnsignedInt(bb, sizeOfNumberOfRecords); final int totalNumberOfChildRecords; if (depth > 1) { @@ -144,7 +144,7 @@ private void readRecords(HdfBackingStorage hdfFc, long address, int depth, int n } else { totalNumberOfChildRecords = -1; } - readRecords(hdfFc, childAddress, depth - 1, numberOfChildRecords, totalNumberOfChildRecords, datasetInfo); + readRecords(hdfBackingStorage, childAddress, depth - 1, numberOfChildRecords, totalNumberOfChildRecords, datasetInfo); } } bb.limit(bb.position() + 4); diff --git a/jhdf/src/main/java/io/jhdf/dataset/CompactDataset.java b/jhdf/src/main/java/io/jhdf/dataset/CompactDataset.java index b7325276..2ff421c5 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/CompactDataset.java +++ b/jhdf/src/main/java/io/jhdf/dataset/CompactDataset.java @@ -18,8 +18,8 @@ public class CompactDataset extends DatasetBase { - public CompactDataset(HdfBackingStorage hdfFc, long address, String name, Group parent, ObjectHeader oh) { - super(hdfFc, address, name, parent, oh); + public CompactDataset(HdfBackingStorage hdfBackingStorage, long address, String name, Group parent, ObjectHeader oh) { + super(hdfBackingStorage, address, name, parent, oh); } @Override diff --git a/jhdf/src/main/java/io/jhdf/dataset/CompoundDatasetReader.java b/jhdf/src/main/java/io/jhdf/dataset/CompoundDatasetReader.java index d0ef51f9..5b5a782e 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/CompoundDatasetReader.java +++ b/jhdf/src/main/java/io/jhdf/dataset/CompoundDatasetReader.java @@ -25,7 +25,7 @@ private CompoundDatasetReader() { throw new AssertionError("No instances of CompoundDatasetReader"); } - public static Map readDataset(CompoundDataType type, ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { + public static Map readDataset(CompoundDataType type, ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { final int sizeAsInt = Arrays.stream(dimensions).reduce(1, Math::multiplyExact); final List members = type.getMembers(); @@ -46,7 +46,7 @@ public static Map readDataset(CompoundDataType type, ByteBuffer // Now read this member memberBuffer.rewind(); - final Object memberData = DatasetReader.readDataset(member.getDataType(), memberBuffer, dimensions, hdfFc); + final Object memberData = DatasetReader.readDataset(member.getDataType(), memberBuffer, dimensions, hdfBackingStorage); data.put(member.getName(), memberData); } diff --git a/jhdf/src/main/java/io/jhdf/dataset/ContiguousDatasetImpl.java b/jhdf/src/main/java/io/jhdf/dataset/ContiguousDatasetImpl.java index 34470d72..4c8e50d9 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/ContiguousDatasetImpl.java +++ b/jhdf/src/main/java/io/jhdf/dataset/ContiguousDatasetImpl.java @@ -24,15 +24,15 @@ public class ContiguousDatasetImpl extends DatasetBase implements ContiguousData final ContiguousDataLayoutMessage contiguousDataLayoutMessage; - public ContiguousDatasetImpl(HdfBackingStorage hdfFc, long address, String name, Group parent, ObjectHeader oh) { - super(hdfFc, address, name, parent, oh); + public ContiguousDatasetImpl(HdfBackingStorage hdfBackingStorage, long address, String name, Group parent, ObjectHeader oh) { + super(hdfBackingStorage, address, name, parent, oh); this.contiguousDataLayoutMessage = getHeaderMessage(ContiguousDataLayoutMessage.class); } @Override public ByteBuffer getDataBuffer() { try { - ByteBuffer data = hdfFc.map(contiguousDataLayoutMessage.getAddress(), getSizeInBytes()); + ByteBuffer data = hdfBackingStorage.map(contiguousDataLayoutMessage.getAddress(), getSizeInBytes()); convertToCorrectEndiness(data); return data; } catch (Exception e) { diff --git a/jhdf/src/main/java/io/jhdf/dataset/DatasetBase.java b/jhdf/src/main/java/io/jhdf/dataset/DatasetBase.java index d4bc213d..1c25c061 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/DatasetBase.java +++ b/jhdf/src/main/java/io/jhdf/dataset/DatasetBase.java @@ -37,15 +37,15 @@ public abstract class DatasetBase extends AbstractNode implements Dataset { private static final Logger logger = LoggerFactory.getLogger(DatasetBase.class); - protected final HdfBackingStorage hdfFc; + protected final HdfBackingStorage hdfBackingStorage; protected final ObjectHeader oh; private final DataType dataType; private final DataSpace dataSpace; - public DatasetBase(HdfBackingStorage hdfFc, long address, String name, Group parent, ObjectHeader oh) { - super(hdfFc, address, name, parent); - this.hdfFc = hdfFc; + public DatasetBase(HdfBackingStorage hdfBackingStorage, long address, String name, Group parent, ObjectHeader oh) { + super(hdfBackingStorage, address, name, parent); + this.hdfBackingStorage = hdfBackingStorage; this.oh = oh; dataType = getHeaderMessage(DataTypeMessage.class).getDataType(); @@ -125,7 +125,7 @@ public Object getData() { final ByteBuffer bb = getDataBuffer(); final DataType type = getDataType(); - return DatasetReader.readDataset(type, bb, getDimensions(), hdfFc); + return DatasetReader.readDataset(type, bb, getDimensions(), hdfBackingStorage); } @Override @@ -155,7 +155,7 @@ public Object getFillValue() { if (fillValueMessage.isFillValueDefined()) { ByteBuffer bb = fillValueMessage.getFillValue(); // Convert to data pass zero length dims for scalar - return DatasetReader.readDataset(getDataType(), bb, new int[0], hdfFc); + return DatasetReader.readDataset(getDataType(), bb, new int[0], hdfBackingStorage); } else { return null; } diff --git a/jhdf/src/main/java/io/jhdf/dataset/DatasetLoader.java b/jhdf/src/main/java/io/jhdf/dataset/DatasetLoader.java index bc5dea85..8819cfd4 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/DatasetLoader.java +++ b/jhdf/src/main/java/io/jhdf/dataset/DatasetLoader.java @@ -28,7 +28,7 @@ private DatasetLoader() { throw new AssertionError("No instances of DatasetLoader"); } - public static Dataset createDataset(HdfBackingStorage hdfFc, ObjectHeader oh, String name, + public static Dataset createDataset(HdfBackingStorage hdfBackingStorage, ObjectHeader oh, String name, Group parent) { final long address = oh.getAddress(); @@ -37,16 +37,16 @@ public static Dataset createDataset(HdfBackingStorage hdfFc, ObjectHeader oh, St final DataLayoutMessage dlm = oh.getMessageOfType(DataLayoutMessage.class); if (dlm instanceof CompactDataLayoutMessage) { - return new CompactDataset(hdfFc, address, name, parent, oh); + return new CompactDataset(hdfBackingStorage, address, name, parent, oh); } else if (dlm instanceof ContiguousDataLayoutMessage) { - return new ContiguousDatasetImpl(hdfFc, address, name, parent, oh); + return new ContiguousDatasetImpl(hdfBackingStorage, address, name, parent, oh); } else if (dlm instanceof ChunkedDataLayoutMessage) { - return new ChunkedDatasetV3(hdfFc, address, name, parent, oh); + return new ChunkedDatasetV3(hdfBackingStorage, address, name, parent, oh); } else if (dlm instanceof ChunkedDataLayoutMessageV4) { - return new ChunkedDatasetV4(hdfFc, address, name, parent, oh); + return new ChunkedDatasetV4(hdfBackingStorage, address, name, parent, oh); } else { throw new HdfException("Unrecognized Dataset layout type: " + dlm.getClass().getCanonicalName()); diff --git a/jhdf/src/main/java/io/jhdf/dataset/DatasetReader.java b/jhdf/src/main/java/io/jhdf/dataset/DatasetReader.java index e0b46ad5..cf7e4c70 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/DatasetReader.java +++ b/jhdf/src/main/java/io/jhdf/dataset/DatasetReader.java @@ -46,10 +46,10 @@ private DatasetReader() { * @param type The data type of this dataset * @param buffer The buffer containing the dataset * @param dimensions The dimensions of this dataset - * @param hdfFc The file channel for reading the file + * @param hdfBackingStorage The file channel for reading the file * @return A Java object representation of this dataset */ - public static Object readDataset(DataType type, ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { + public static Object readDataset(DataType type, ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { // If the data is scalar make a fake one element array then remove it at the end final boolean isScalar; @@ -61,7 +61,7 @@ public static Object readDataset(DataType type, ByteBuffer buffer, int[] dimensi isScalar = false; } - final Object data = type.fillData(buffer, dimensions, hdfFc); + final Object data = type.fillData(buffer, dimensions, hdfBackingStorage); if (isScalar) { return Array.get(data, 0); diff --git a/jhdf/src/main/java/io/jhdf/dataset/NoParent.java b/jhdf/src/main/java/io/jhdf/dataset/NoParent.java index 0ba0d9b4..22b03f08 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/NoParent.java +++ b/jhdf/src/main/java/io/jhdf/dataset/NoParent.java @@ -24,7 +24,7 @@ /** * A placeholder for a {@link Group} to use when the group of a {@link Dataset} is not known because it is specified by * a {@link io.jhdf.object.datatype.Reference}. Allows to do - * {@code DatasetLoader.createDataset(hdfFc, linkHeader, "Unknown dataset", NoParent.INSTANCE)}. + * {@code DatasetLoader.createDataset(hdfBackingStorage, linkHeader, "Unknown dataset", NoParent.INSTANCE)}. */ public enum NoParent implements Group { diff --git a/jhdf/src/main/java/io/jhdf/dataset/VariableLengthDatasetReader.java b/jhdf/src/main/java/io/jhdf/dataset/VariableLengthDatasetReader.java index 1a0a3510..cbf0d31b 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/VariableLengthDatasetReader.java +++ b/jhdf/src/main/java/io/jhdf/dataset/VariableLengthDatasetReader.java @@ -35,7 +35,7 @@ private VariableLengthDatasetReader() { throw new AssertionError("No instances of VariableLengthDatasetReader"); } - public static Object readDataset(VariableLength type, ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { + public static Object readDataset(VariableLength type, ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { // Make the array to hold the data Class javaType = type.getJavaType(); @@ -55,14 +55,14 @@ public static Object readDataset(VariableLength type, ByteBuffer buffer, int[] d final Map heaps = new HashMap<>(); List elements = new ArrayList<>(); - for (GlobalHeapId globalHeapId : getGlobalHeapIds(buffer, type.getSize(), hdfFc, getTotalPoints(dimensions))) { + for (GlobalHeapId globalHeapId : getGlobalHeapIds(buffer, type.getSize(), hdfBackingStorage, getTotalPoints(dimensions))) { if(globalHeapId.getIndex() == 0) { // https://github.com/jamesmudd/jhdf/issues/247 // Empty arrays have index=0 and address=0 elements.add(EMPTY_BYTE_BUFFER); } else { GlobalHeap heap = heaps.computeIfAbsent(globalHeapId.getHeapAddress(), - address -> new GlobalHeap(hdfFc, address)); + address -> new GlobalHeap(hdfBackingStorage, address)); ByteBuffer bb = heap.getObjectData(globalHeapId.getIndex()); elements.add(bb); @@ -73,7 +73,7 @@ public static Object readDataset(VariableLength type, ByteBuffer buffer, int[] d if(type.isVariableLengthString()) { fillStringData(type, data, dimensions, elements.iterator()); } else { - fillData(type.getParent(), data, dimensions, elements.iterator(), hdfFc); + fillData(type.getParent(), data, dimensions, elements.iterator(), hdfBackingStorage); } if (isScalar) { @@ -83,17 +83,17 @@ public static Object readDataset(VariableLength type, ByteBuffer buffer, int[] d } } - private static void fillData(DataType dataType, Object data, int[] dims, Iterator elements, HdfBackingStorage hdfFc) { + private static void fillData(DataType dataType, Object data, int[] dims, Iterator elements, HdfBackingStorage hdfBackingStorage) { if (dims.length > 1) { for (int i = 0; i < dims[0]; i++) { Object newArray = Array.get(data, i); - fillData(dataType, newArray, stripLeadingIndex(dims), elements, hdfFc); + fillData(dataType, newArray, stripLeadingIndex(dims), elements, hdfBackingStorage); } } else { for (int i = 0; i < dims[0]; i++) { ByteBuffer buffer = elements.next(); int[] elementDims = new int[]{ buffer.limit() / dataType.getSize()}; - Object elementData = DatasetReader.readDataset(dataType, buffer, elementDims, hdfFc); + Object elementData = DatasetReader.readDataset(dataType, buffer, elementDims, hdfBackingStorage); Array.set(data, i, elementData); } } @@ -114,13 +114,13 @@ private static void fillStringData(VariableLength dataType, Object data, int[] d } } - private static List getGlobalHeapIds(ByteBuffer bb, int length, HdfBackingStorage hdfFc, + private static List getGlobalHeapIds(ByteBuffer bb, int length, HdfBackingStorage hdfBackingStorage, int datasetTotalSize) { // For variable length datasets the actual data is in the global heap so need to // resolve that then build the buffer. List ids = new ArrayList<>(datasetTotalSize); - final int skipBytes = length - hdfFc.getSizeOfOffsets() - 4; // id=4 + final int skipBytes = length - hdfBackingStorage.getSizeOfOffsets() - 4; // id=4 // Assume all global heap buffers are little endian bb.order(LITTLE_ENDIAN); @@ -128,7 +128,7 @@ private static List getGlobalHeapIds(ByteBuffer bb, int length, Hd while (bb.remaining() >= length) { // Move past the skipped bytes. TODO figure out what this is for bb.position(bb.position() + skipBytes); - long heapAddress = Utils.readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + long heapAddress = Utils.readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); int index = Utils.readBytesAsUnsignedInt(bb, 4); GlobalHeapId globalHeapId = new GlobalHeapId(heapAddress, index); ids.add(globalHeapId); diff --git a/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetBase.java b/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetBase.java index 279e7c5f..e6771a1c 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetBase.java +++ b/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetBase.java @@ -36,8 +36,8 @@ public abstract class ChunkedDatasetBase extends DatasetBase implements ChunkedD protected final FilterPipelineLazyInitializer lazyPipeline; - public ChunkedDatasetBase(HdfBackingStorage hdfFc, long address, String name, Group parent, ObjectHeader oh) { - super(hdfFc, address, name, parent, oh); + public ChunkedDatasetBase(HdfBackingStorage hdfBackingStorage, long address, String name, Group parent, ObjectHeader oh) { + super(hdfBackingStorage, address, name, parent, oh); lazyPipeline = new FilterPipelineLazyInitializer(); } @@ -252,7 +252,7 @@ private byte[] decompressChunk(Chunk chunk) { private ByteBuffer getDataBuffer(Chunk chunk) { try { - return hdfFc.map(chunk.getAddress(), chunk.getSize()); + return hdfBackingStorage.map(chunk.getAddress(), chunk.getSize()); } catch (Exception e) { throw new HdfException( "Failed to read chunk for dataset '" + getPath() + "' at address " + chunk.getAddress()); diff --git a/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV3.java b/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV3.java index b5deafb1..0c1ce1c3 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV3.java +++ b/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV3.java @@ -43,8 +43,8 @@ public class ChunkedDatasetV3 extends ChunkedDatasetBase { private final ChunkLookupLazyInitializer chunkLookupLazyInitializer; - public ChunkedDatasetV3(HdfBackingStorage hdfFc, long address, String name, Group parent, ObjectHeader oh) { - super(hdfFc, address, name, parent, oh); + public ChunkedDatasetV3(HdfBackingStorage hdfBackingStorage, long address, String name, Group parent, ObjectHeader oh) { + super(hdfBackingStorage, address, name, parent, oh); layoutMessage = oh.getMessageOfType(ChunkedDataLayoutMessage.class); @@ -74,7 +74,7 @@ protected Map initialize() { return Collections.emptyMap(); } - final BTreeV1Data bTree = BTreeV1.createDataBTree(hdfFc, layoutMessage.getBTreeAddress(), getDimensions().length); + final BTreeV1Data bTree = BTreeV1.createDataBTree(hdfBackingStorage, layoutMessage.getBTreeAddress(), getDimensions().length); final Collection allChunks = bTree.getChunks(); return allChunks.stream(). diff --git a/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV4.java b/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV4.java index 4ab8d97f..dcc0c2ac 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV4.java +++ b/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV4.java @@ -41,8 +41,8 @@ public class ChunkedDatasetV4 extends ChunkedDatasetBase { private final ChunkedDataLayoutMessageV4 layoutMessage; private final ChunkLookupLazyInitializer chunkLookupLazyInitializer; - public ChunkedDatasetV4(HdfBackingStorage hdfFc, long address, String name, Group parent, ObjectHeader oh) { - super(hdfFc, address, name, parent, oh); + public ChunkedDatasetV4(HdfBackingStorage hdfBackingStorage, long address, String name, Group parent, ObjectHeader oh) { + super(hdfBackingStorage, address, name, parent, oh); layoutMessage = oh.getMessageOfType(ChunkedDataLayoutMessageV4.class); chunkLookupLazyInitializer = new ChunkLookupLazyInitializer(); @@ -88,15 +88,15 @@ protected Map initialize() { throw new UnsupportedHdfException("Implicit indexing is currently not supported"); case 3: // Fixed array logger.debug("Reading fixed array indexed dataset"); - chunkIndex = new FixedArrayIndex(hdfFc, layoutMessage.getAddress(), datasetInfo); + chunkIndex = new FixedArrayIndex(hdfBackingStorage, layoutMessage.getAddress(), datasetInfo); break; case 4: // Extensible Array logger.debug("Reading extensible array indexed dataset"); - chunkIndex = new ExtensibleArrayIndex(hdfFc, layoutMessage.getAddress(), datasetInfo); + chunkIndex = new ExtensibleArrayIndex(hdfBackingStorage, layoutMessage.getAddress(), datasetInfo); break; case 5: // B Tree V2 logger.debug("Reading B tree v2 indexed dataset"); - chunkIndex = new BTreeIndex(hdfFc, layoutMessage.getAddress(), datasetInfo); + chunkIndex = new BTreeIndex(hdfBackingStorage, layoutMessage.getAddress(), datasetInfo); break; default: throw new HdfException("Unrecognized chunk indexing type = " + layoutMessage.getIndexingType()); diff --git a/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/BTreeIndex.java b/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/BTreeIndex.java index 8c655702..496cf63b 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/BTreeIndex.java +++ b/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/BTreeIndex.java @@ -28,8 +28,8 @@ public class BTreeIndex implements ChunkIndex { private final BTreeV2 bTreeV2; - public BTreeIndex(HdfBackingStorage hdfFc, long address, DatasetInfo datasetInfo) { - bTreeV2 = new BTreeV2<>(hdfFc, address, datasetInfo); + public BTreeIndex(HdfBackingStorage hdfBackingStorage, long address, DatasetInfo datasetInfo) { + bTreeV2 = new BTreeV2<>(hdfBackingStorage, address, datasetInfo); } @Override diff --git a/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/ExtensibleArrayIndex.java b/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/ExtensibleArrayIndex.java index efa2b745..1de26542 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/ExtensibleArrayIndex.java +++ b/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/ExtensibleArrayIndex.java @@ -70,14 +70,14 @@ public class ExtensibleArrayIndex implements ChunkIndex { private int elementCounter = 0; - public ExtensibleArrayIndex(HdfBackingStorage hdfFc, long address, DatasetInfo datasetInfo) { + public ExtensibleArrayIndex(HdfBackingStorage hdfBackingStorage, long address, DatasetInfo datasetInfo) { this.headerAddress = address; this.unfilteredChunkSize = datasetInfo.getChunkSizeInBytes(); this.datasetDimensions = datasetInfo.getDatasetDimensions(); this.chunkDimensions = datasetInfo.getChunkDimensions(); - final int headerSize = 16 + hdfFc.getSizeOfOffsets() + 6 * hdfFc.getSizeOfLengths(); - final ByteBuffer bb = hdfFc.readBufferFromAddress(address, headerSize); + final int headerSize = 16 + hdfBackingStorage.getSizeOfOffsets() + 6 * hdfBackingStorage.getSizeOfLengths(); + final ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, headerSize); verifySignature(bb, EXTENSIBLE_ARRAY_HEADER_SIGNATURE); @@ -107,19 +107,19 @@ public ExtensibleArrayIndex(HdfBackingStorage hdfFc, long address, DatasetInfo d secondaryBlockPointerCounter = new ExtensibleArraySecondaryBlockPointerCounter(minNumberOfDataBlockPointers); maxNumberOfElementsInDataBlockPageBits = bb.get(); - numberOfSecondaryBlocks = Utils.readBytesAsUnsignedInt(bb, hdfFc.getSizeOfLengths()); - secondaryBlockSize = Utils.readBytesAsUnsignedInt(bb, hdfFc.getSizeOfLengths()); - final int numberOfDataBlocks = Utils.readBytesAsUnsignedInt(bb, hdfFc.getSizeOfLengths()); - dataBlockSize = Utils.readBytesAsUnsignedInt(bb, hdfFc.getSizeOfLengths()); + numberOfSecondaryBlocks = Utils.readBytesAsUnsignedInt(bb, hdfBackingStorage.getSizeOfLengths()); + secondaryBlockSize = Utils.readBytesAsUnsignedInt(bb, hdfBackingStorage.getSizeOfLengths()); + final int numberOfDataBlocks = Utils.readBytesAsUnsignedInt(bb, hdfBackingStorage.getSizeOfLengths()); + dataBlockSize = Utils.readBytesAsUnsignedInt(bb, hdfBackingStorage.getSizeOfLengths()); - final int maxIndexSet = Utils.readBytesAsUnsignedInt(bb, hdfFc.getSizeOfLengths()); + final int maxIndexSet = Utils.readBytesAsUnsignedInt(bb, hdfBackingStorage.getSizeOfLengths()); chunks = new ArrayList<>(maxIndexSet); - numberOfElements = Utils.readBytesAsUnsignedInt(bb, hdfFc.getSizeOfLengths()); + numberOfElements = Utils.readBytesAsUnsignedInt(bb, hdfBackingStorage.getSizeOfLengths()); - final int indexBlockAddress = Utils.readBytesAsUnsignedInt(bb, hdfFc.getSizeOfLengths()); + final int indexBlockAddress = Utils.readBytesAsUnsignedInt(bb, hdfBackingStorage.getSizeOfLengths()); - new ExtensibleArrayIndexBlock(hdfFc, indexBlockAddress); + new ExtensibleArrayIndexBlock(hdfBackingStorage, indexBlockAddress); // Checksum bb.rewind(); @@ -128,18 +128,18 @@ public ExtensibleArrayIndex(HdfBackingStorage hdfFc, long address, DatasetInfo d private class ExtensibleArrayIndexBlock { - private ExtensibleArrayIndexBlock(HdfBackingStorage hdfFc, long address) { + private ExtensibleArrayIndexBlock(HdfBackingStorage hdfBackingStorage, long address) { // Figure out the size of the index block - final int headerSize = 6 + hdfFc.getSizeOfOffsets() + final int headerSize = 6 + hdfBackingStorage.getSizeOfOffsets() // TODO need to handle filtered elements - + hdfFc.getSizeOfOffsets() * numberOfElementsInIndexBlock // direct chunk pointers + + hdfBackingStorage.getSizeOfOffsets() * numberOfElementsInIndexBlock // direct chunk pointers + 6 * extensibleArrayElementSize // Always up to 6 data block pointers are in the index block - + numberOfSecondaryBlocks * hdfFc.getSizeOfOffsets() // Secondary block addresses. + + numberOfSecondaryBlocks * hdfBackingStorage.getSizeOfOffsets() // Secondary block addresses. + 4; // checksum - final ByteBuffer bb = hdfFc.readBufferFromAddress(address, headerSize); + final ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, headerSize); verifySignature(bb, EXTENSIBLE_ARRAY_INDEX_BLOCK_SIGNATURE); @@ -154,7 +154,7 @@ private ExtensibleArrayIndexBlock(HdfBackingStorage hdfFc, long address) { throw new HdfException("Extensible array client ID mismatch. Possible file corruption detected"); } - final long headerAddress = readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + final long headerAddress = readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); if (headerAddress != ExtensibleArrayIndex.this.headerAddress) { throw new HdfException("Extensible array data block header address mismatch"); } @@ -162,25 +162,25 @@ private ExtensibleArrayIndexBlock(HdfBackingStorage hdfFc, long address) { // Elements in Index block boolean readElement = true; for (int i = 0; readElement && i < numberOfElementsInIndexBlock; i++) { - readElement = readElement(bb, hdfFc); + readElement = readElement(bb, hdfBackingStorage); } // Guard against all the elements having already been read if (readElement && numberOfElements > numberOfElementsInIndexBlock) { // Upto 6 data block pointers directly in the index block for (int i = 0; i < 6; i++) { - final long dataBlockAddress = readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + final long dataBlockAddress = readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); if (dataBlockAddress == UNDEFINED_ADDRESS) { break; // There was less than 6 data blocks for the full dataset } - new ExtensibleArrayDataBlock(hdfFc, dataBlockAddress); + new ExtensibleArrayDataBlock(hdfBackingStorage, dataBlockAddress); } } // Now read secondary blocks for (int i = 0; i < numberOfSecondaryBlocks; i++) { - final long secondaryBlockAddress = readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); - new ExtensibleArraySecondaryBlock(hdfFc, secondaryBlockAddress); + final long secondaryBlockAddress = readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); + new ExtensibleArraySecondaryBlock(hdfBackingStorage, secondaryBlockAddress); } // Checksum @@ -190,14 +190,14 @@ private ExtensibleArrayIndexBlock(HdfBackingStorage hdfFc, long address) { private class ExtensibleArrayDataBlock { - private ExtensibleArrayDataBlock(HdfBackingStorage hdfFc, long address) { + private ExtensibleArrayDataBlock(HdfBackingStorage hdfBackingStorage, long address) { final int numberOfElementsInDataBlock = dataBlockElementCounter.getNextNumberOfChunks(); - final int headerSize = 6 + hdfFc.getSizeOfOffsets() + blockOffsetSize + final int headerSize = 6 + hdfBackingStorage.getSizeOfOffsets() + blockOffsetSize + numberOfElementsInDataBlock * extensibleArrayElementSize // elements (chunks) + 4; // checksum - final ByteBuffer bb = hdfFc.readBufferFromAddress(address, headerSize); + final ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, headerSize); verifySignature(bb, EXTENSIBLE_ARRAY_DATA_BLOCK_SIGNATURE); @@ -212,7 +212,7 @@ private ExtensibleArrayDataBlock(HdfBackingStorage hdfFc, long address) { throw new HdfException("Extensible array client ID mismatch. Possible file corruption detected"); } - final long headerAddress = readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + final long headerAddress = readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); if (headerAddress != ExtensibleArrayIndex.this.headerAddress) { throw new HdfException("Extensible array data block header address mismatch"); } @@ -224,7 +224,7 @@ private ExtensibleArrayDataBlock(HdfBackingStorage hdfFc, long address) { // Data block addresses boolean readElement = true; for (int i = 0; readElement && i < numberOfElementsInDataBlock; i++) { - readElement = readElement(bb, hdfFc); + readElement = readElement(bb, hdfBackingStorage); } // Checksum @@ -236,17 +236,17 @@ private ExtensibleArrayDataBlock(HdfBackingStorage hdfFc, long address) { private class ExtensibleArraySecondaryBlock { - private ExtensibleArraySecondaryBlock(HdfBackingStorage hdfFc, long address) { + private ExtensibleArraySecondaryBlock(HdfBackingStorage hdfBackingStorage, long address) { final int numberOfPointers = secondaryBlockPointerCounter.getNextNumberOfPointers(); - final int secondaryBlockSize = 6 + hdfFc.getSizeOfOffsets() + + final int secondaryBlockSize = 6 + hdfBackingStorage.getSizeOfOffsets() + blockOffsetSize + // Page Bitmap ? numberOfPointers * extensibleArrayElementSize + 4; // checksum - final ByteBuffer bb = hdfFc.readBufferFromAddress(address, secondaryBlockSize); + final ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, secondaryBlockSize); verifySignature(bb, EXTENSIBLE_ARRAY_SECONDARY_BLOCK_SIGNATURE); @@ -261,7 +261,7 @@ private ExtensibleArraySecondaryBlock(HdfBackingStorage hdfFc, long address) { throw new HdfException("Extensible array client ID mismatch. Possible file corruption detected"); } - final long headerAddress = readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + final long headerAddress = readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); if (headerAddress != ExtensibleArrayIndex.this.headerAddress) { throw new HdfException("Extensible array secondary block header address mismatch"); } @@ -272,11 +272,11 @@ private ExtensibleArraySecondaryBlock(HdfBackingStorage hdfFc, long address) { // Data block addresses for (int i = 0; i < numberOfPointers; i++) { - long dataBlockAddress = readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + long dataBlockAddress = readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); if (dataBlockAddress == UNDEFINED_ADDRESS) { break; // This is the last secondary block and not full. } - new ExtensibleArrayDataBlock(hdfFc, dataBlockAddress); + new ExtensibleArrayDataBlock(hdfBackingStorage, dataBlockAddress); } // Checksum @@ -295,15 +295,15 @@ private ExtensibleArraySecondaryBlock(HdfBackingStorage hdfFc, long address) { * Reads an element from the buffer and adds it to the chunks list. * * @param bb buffer to read from - * @param hdfFc the HDF file channel + * @param hdfBackingStorage the HDF file channel * @return true if element was read false otherwise */ - private boolean readElement(ByteBuffer bb, HdfBackingStorage hdfFc) { - final long chunkAddress = readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + private boolean readElement(ByteBuffer bb, HdfBackingStorage hdfBackingStorage) { + final long chunkAddress = readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); if (chunkAddress != UNDEFINED_ADDRESS) { final int[] chunkOffset = Utils.chunkIndexToChunkOffset(elementCounter, chunkDimensions, datasetDimensions); if (filtered) { // Filtered - final int chunkSizeInBytes = Utils.readBytesAsUnsignedInt(bb, extensibleArrayElementSize - hdfFc.getSizeOfOffsets() - 4); + final int chunkSizeInBytes = Utils.readBytesAsUnsignedInt(bb, extensibleArrayElementSize - hdfBackingStorage.getSizeOfOffsets() - 4); final BitSet filterMask = BitSet.valueOf(new byte[] { bb.get(), bb.get(), bb.get(), bb.get() }); chunks.add(new ChunkImpl(chunkAddress, chunkSizeInBytes, chunkOffset, filterMask)); } else { // Not filtered diff --git a/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/FixedArrayIndex.java b/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/FixedArrayIndex.java index a0c0e138..bfd4ffab 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/FixedArrayIndex.java +++ b/jhdf/src/main/java/io/jhdf/dataset/chunked/indexing/FixedArrayIndex.java @@ -43,14 +43,14 @@ public class FixedArrayIndex implements ChunkIndex { private final List chunks; - public FixedArrayIndex(HdfBackingStorage hdfFc, long address, DatasetInfo datasetInfo) { + public FixedArrayIndex(HdfBackingStorage hdfBackingStorage, long address, DatasetInfo datasetInfo) { this.address = address; this.unfilteredChunkSize = datasetInfo.getChunkSizeInBytes(); this.datasetDimensions = datasetInfo.getDatasetDimensions(); this.chunkDimensions = datasetInfo.getChunkDimensions(); - final int headerSize = 12 + hdfFc.getSizeOfOffsets() + hdfFc.getSizeOfLengths(); - final ByteBuffer bb = hdfFc.readBufferFromAddress(address, headerSize); + final int headerSize = 12 + hdfBackingStorage.getSizeOfOffsets() + hdfBackingStorage.getSizeOfLengths(); + final ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, headerSize); byte[] formatSignatureBytes = new byte[4]; bb.get(formatSignatureBytes, 0, formatSignatureBytes.length); @@ -70,8 +70,8 @@ public FixedArrayIndex(HdfBackingStorage hdfFc, long address, DatasetInfo datase entrySize = bb.get(); pageBits = bb.get(); - maxNumberOfEntries = Utils.readBytesAsUnsignedInt(bb, hdfFc.getSizeOfLengths()); - dataBlockAddress = Utils.readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + maxNumberOfEntries = Utils.readBytesAsUnsignedInt(bb, hdfBackingStorage.getSizeOfLengths()); + dataBlockAddress = Utils.readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); chunks = new ArrayList<>(maxNumberOfEntries); @@ -80,16 +80,16 @@ public FixedArrayIndex(HdfBackingStorage hdfFc, long address, DatasetInfo datase ChecksumUtils.validateChecksum(bb); // Building the object fills the chunks. Probably shoudld be changed - new FixedArrayDataBlock(this, hdfFc, dataBlockAddress); + new FixedArrayDataBlock(this, hdfBackingStorage, dataBlockAddress); } private static class FixedArrayDataBlock { - private FixedArrayDataBlock(FixedArrayIndex fixedArrayIndex, HdfBackingStorage hdfFc, long address) { + private FixedArrayDataBlock(FixedArrayIndex fixedArrayIndex, HdfBackingStorage hdfBackingStorage, long address) { // TODO header size ignoring paging - final int headerSize = 6 + hdfFc.getSizeOfOffsets() + fixedArrayIndex.entrySize * fixedArrayIndex.maxNumberOfEntries + 4; - final ByteBuffer bb = hdfFc.readBufferFromAddress(address, headerSize); + final int headerSize = 6 + hdfBackingStorage.getSizeOfOffsets() + fixedArrayIndex.entrySize * fixedArrayIndex.maxNumberOfEntries + 4; + final ByteBuffer bb = hdfBackingStorage.readBufferFromAddress(address, headerSize); byte[] formatSignatureBytes = new byte[4]; bb.get(formatSignatureBytes, 0, formatSignatureBytes.length); @@ -110,7 +110,7 @@ private FixedArrayDataBlock(FixedArrayIndex fixedArrayIndex, HdfBackingStorage h throw new HdfException("Fixed array client ID mismatch. Possible file corruption detected"); } - final long headerAddress = Utils.readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + final long headerAddress = Utils.readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); if (headerAddress != fixedArrayIndex.address) { throw new HdfException("Fixed array data block header address missmatch"); } @@ -119,14 +119,14 @@ private FixedArrayDataBlock(FixedArrayIndex fixedArrayIndex, HdfBackingStorage h if (clientId == 0) { // Not filtered for (int i = 0; i < fixedArrayIndex.maxNumberOfEntries; i++) { - final long chunkAddress = Utils.readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); + final long chunkAddress = Utils.readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); final int[] chunkOffset = Utils.chunkIndexToChunkOffset(i, fixedArrayIndex.chunkDimensions, fixedArrayIndex.datasetDimensions); fixedArrayIndex.chunks.add(new ChunkImpl(chunkAddress, fixedArrayIndex.unfilteredChunkSize, chunkOffset)); } } else if (clientId == 1) { // Filtered for (int i = 0; i < fixedArrayIndex.maxNumberOfEntries; i++) { - final long chunkAddress = Utils.readBytesAsUnsignedLong(bb, hdfFc.getSizeOfOffsets()); - final int chunkSizeInBytes = Utils.readBytesAsUnsignedInt(bb, fixedArrayIndex.entrySize - hdfFc.getSizeOfOffsets() - 4); + final long chunkAddress = Utils.readBytesAsUnsignedLong(bb, hdfBackingStorage.getSizeOfOffsets()); + final int chunkSizeInBytes = Utils.readBytesAsUnsignedInt(bb, fixedArrayIndex.entrySize - hdfBackingStorage.getSizeOfOffsets() - 4); final BitSet filterMask = BitSet.valueOf(new byte[] { bb.get(), bb.get(), bb.get(), bb.get() }); final int[] chunkOffset = Utils.chunkIndexToChunkOffset(i, fixedArrayIndex.chunkDimensions, fixedArrayIndex.datasetDimensions); diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/ArrayDataType.java b/jhdf/src/main/java/io/jhdf/object/datatype/ArrayDataType.java index 96b7d2b8..53609975 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/ArrayDataType.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/ArrayDataType.java @@ -66,14 +66,14 @@ public int[] getDimensions() { } @Override - public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { + public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { if (dimensions.length !=1) { throw new HdfException("Multi dimension array data types are not supported"); } final Object data = Array.newInstance(getJavaType(), dimensions); for (int i = 0; i < dimensions[0]; i++) { final ByteBuffer elementBuffer = Utils.createSubBuffer(buffer, getBaseType().getSize() * getDimensions()[0]); - final Object elementDataset = DatasetReader.readDataset(getBaseType(), elementBuffer, getDimensions(), hdfFc); + final Object elementDataset = DatasetReader.readDataset(getBaseType(), elementBuffer, getDimensions(), hdfBackingStorage); Array.set(data, i, elementDataset); } return data; diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/BitField.java b/jhdf/src/main/java/io/jhdf/object/datatype/BitField.java index 31bb3e50..5ef3fff2 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/BitField.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/BitField.java @@ -67,7 +67,7 @@ public Class getJavaType() { } @Override - public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { + public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { final Object data = Array.newInstance(getJavaType(), dimensions); fillBitfieldData(data, dimensions, buffer.order(getByteOrder())); return data; diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/CompoundDataType.java b/jhdf/src/main/java/io/jhdf/object/datatype/CompoundDataType.java index 07b393b7..37dc3ea6 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/CompoundDataType.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/CompoundDataType.java @@ -96,8 +96,8 @@ public List getMembers() { } @Override - public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { - return CompoundDatasetReader.readDataset(this, buffer, dimensions, hdfFc); + public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { + return CompoundDatasetReader.readDataset(this, buffer, dimensions, hdfBackingStorage); } public static class CompoundDataMember { diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/DataType.java b/jhdf/src/main/java/io/jhdf/object/datatype/DataType.java index 6a4202c0..6cafe4df 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/DataType.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/DataType.java @@ -103,6 +103,6 @@ public int getSize() { public abstract Class getJavaType(); - public abstract Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc); + public abstract Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage); } diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/EnumDataType.java b/jhdf/src/main/java/io/jhdf/object/datatype/EnumDataType.java index 18d7fe97..0ecd6ca1 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/EnumDataType.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/EnumDataType.java @@ -81,7 +81,7 @@ public Map getEnumMapping() { } @Override - public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { + public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { return EnumDatasetReader.readEnumDataset(this, buffer, dimensions); } } diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/FixedPoint.java b/jhdf/src/main/java/io/jhdf/object/datatype/FixedPoint.java index 2c419647..44c70e0d 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/FixedPoint.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/FixedPoint.java @@ -103,7 +103,7 @@ public Class getJavaType() { } @Override - public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { + public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { final Object data = Array.newInstance(getJavaType(), dimensions); final ByteOrder byteOrder = getByteOrder(); if (isSigned()) { diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/FloatingPoint.java b/jhdf/src/main/java/io/jhdf/object/datatype/FloatingPoint.java index 0340601e..7b735b3a 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/FloatingPoint.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/FloatingPoint.java @@ -139,7 +139,7 @@ public Class getJavaType() { } @Override - public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { + public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { final Object data = Array.newInstance(getJavaType(), dimensions); final ByteOrder byteOrder = getByteOrder(); switch (getSize()) { diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/Reference.java b/jhdf/src/main/java/io/jhdf/object/datatype/Reference.java index 6144640f..93b31712 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/Reference.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/Reference.java @@ -77,7 +77,7 @@ public Class getJavaType() { } @Override - public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { + public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { final Object data = Array.newInstance(getJavaType(), dimensions); fillData(data, dimensions, buffer.order(ByteOrder.LITTLE_ENDIAN)); return data; diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/StringData.java b/jhdf/src/main/java/io/jhdf/object/datatype/StringData.java index f0e16bc8..63fcff1e 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/StringData.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/StringData.java @@ -34,7 +34,7 @@ public class StringData extends DataType { private final Charset charset; @Override - public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { + public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { final Object data = Array.newInstance(getJavaType(), dimensions); fillFixedLengthStringData(data, dimensions, buffer, getSize(), getCharset(), getStringPaddingHandler()); return data; diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/VariableLength.java b/jhdf/src/main/java/io/jhdf/object/datatype/VariableLength.java index 300479be..c316b8fa 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/VariableLength.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/VariableLength.java @@ -97,7 +97,7 @@ public Class getJavaType() { } @Override - public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfFc) { - return VariableLengthDatasetReader.readDataset(this, buffer, dimensions, hdfFc); + public Object fillData(ByteBuffer buffer, int[] dimensions, HdfBackingStorage hdfBackingStorage) { + return VariableLengthDatasetReader.readDataset(this, buffer, dimensions, hdfBackingStorage); } } diff --git a/jhdf/src/test/java/io/jhdf/FractalHeapTest.java b/jhdf/src/test/java/io/jhdf/FractalHeapTest.java index 8afe4b8d..7f4fc5e6 100644 --- a/jhdf/src/test/java/io/jhdf/FractalHeapTest.java +++ b/jhdf/src/test/java/io/jhdf/FractalHeapTest.java @@ -37,9 +37,9 @@ void setup() throws IOException { try (RandomAccessFile raf = new RandomAccessFile(new File(testFile), "r")) { FileChannel fc = raf.getChannel(); Superblock sb = Superblock.readSuperblock(fc, 0); - HdfBackingStorage hdfFc = new HdfFileChannel(fc, sb); + HdfBackingStorage hdfBackingStorage = new HdfFileChannel(fc, sb); - fractalHeap = new FractalHeap(hdfFc, 1870); + fractalHeap = new FractalHeap(hdfBackingStorage, 1870); } } diff --git a/jhdf/src/test/java/io/jhdf/GlobalHeapTest.java b/jhdf/src/test/java/io/jhdf/GlobalHeapTest.java index 552f4d6f..6fa796c2 100644 --- a/jhdf/src/test/java/io/jhdf/GlobalHeapTest.java +++ b/jhdf/src/test/java/io/jhdf/GlobalHeapTest.java @@ -37,7 +37,7 @@ class GlobalHeapTest { private GlobalHeap globalHeap; private Superblock sb; - private HdfBackingStorage hdfFc; + private HdfFileChannel hdfFc; @BeforeEach void setup() throws IOException, URISyntaxException { diff --git a/jhdf/src/test/java/io/jhdf/GroupSymbolTableNodeTest.java b/jhdf/src/test/java/io/jhdf/GroupSymbolTableNodeTest.java index b0d9de29..66b05c5a 100644 --- a/jhdf/src/test/java/io/jhdf/GroupSymbolTableNodeTest.java +++ b/jhdf/src/test/java/io/jhdf/GroupSymbolTableNodeTest.java @@ -27,24 +27,24 @@ import static org.hamcrest.Matchers.is; class GroupSymbolTableNodeTest { - private HdfBackingStorage hdfFc; + private HdfBackingStorage hdfBackingStorage; @BeforeEach void setUp() throws IOException, URISyntaxException { final URI testFileUri = this.getClass().getResource("/hdf5/test_file.hdf5").toURI(); FileChannel fc = FileChannel.open(Paths.get(testFileUri), StandardOpenOption.READ); Superblock sb = Superblock.readSuperblock(fc, 0); - hdfFc = new HdfFileChannel(fc, sb); + hdfBackingStorage = new HdfFileChannel(fc, sb); } @AfterEach void after() { - hdfFc.close(); + hdfBackingStorage.close(); } @Test void testGroupSymbolTableNode() { - GroupSymbolTableNode node = new GroupSymbolTableNode(hdfFc, 1504); + GroupSymbolTableNode node = new GroupSymbolTableNode(hdfBackingStorage, 1504); assertThat(node.getVersion(), is(equalTo((short) 1))); assertThat(node.getNumberOfEntries(), is(equalTo((short) 3))); diff --git a/jhdf/src/test/java/io/jhdf/GroupTest.java b/jhdf/src/test/java/io/jhdf/GroupTest.java index 440275c4..36906bab 100644 --- a/jhdf/src/test/java/io/jhdf/GroupTest.java +++ b/jhdf/src/test/java/io/jhdf/GroupTest.java @@ -37,7 +37,7 @@ class GroupTest { private static final String DATASETS_GROUP = "datasets_group"; - private HdfBackingStorage hdfFc; + private HdfBackingStorage hdfBackingStorage; // Mock private Group rootGroup; @@ -49,7 +49,7 @@ void setUp() throws IOException { FileChannel fc = FileChannel.open(file.toPath(), StandardOpenOption.READ); Superblock sb = Superblock.readSuperblock(fc, 0); - hdfFc = new HdfFileChannel(fc, sb); + hdfBackingStorage = new HdfFileChannel(fc, sb); rootGroup = mock(Group.class); when(rootGroup.getPath()).thenReturn("/"); @@ -58,12 +58,12 @@ void setUp() throws IOException { @AfterEach void after() { - hdfFc.close(); + hdfBackingStorage.close(); } @Test void testGroup() { - Group group = GroupImpl.createGroup(hdfFc, 800, DATASETS_GROUP, rootGroup); + Group group = GroupImpl.createGroup(hdfBackingStorage, 800, DATASETS_GROUP, rootGroup); assertThat(group.getPath(), is(equalTo("/datasets_group/"))); assertThat(group.toString(), is(equalTo("Group [name=datasets_group, path=/datasets_group/, address=0x320]"))); assertThat(group.isGroup(), is(true)); @@ -76,27 +76,27 @@ void testGroup() { @Test void testGettingChildrenByName() { - Group group = GroupImpl.createGroup(hdfFc, 800, DATASETS_GROUP, rootGroup); + Group group = GroupImpl.createGroup(hdfBackingStorage, 800, DATASETS_GROUP, rootGroup); Node child = group.getChild("int"); assertThat(child, is(notNullValue())); } @Test void testGettingMissingChildReturnsNull() { - Group group = GroupImpl.createGroup(hdfFc, 800, DATASETS_GROUP, rootGroup); + Group group = GroupImpl.createGroup(hdfBackingStorage, 800, DATASETS_GROUP, rootGroup); Node child = group.getChild("made_up_missing_child_name"); assertThat(child, is(nullValue())); } @Test void testGetByPathWithInvalidPathReturnsNull() { - Group group = GroupImpl.createGroup(hdfFc, 800, DATASETS_GROUP, rootGroup); + Group group = GroupImpl.createGroup(hdfBackingStorage, 800, DATASETS_GROUP, rootGroup); assertThrows(HdfInvalidPathException.class, () -> group.getByPath("float/missing_node")); } @Test void testGetByPathWithValidPathReturnsNode() { - Group group = GroupImpl.createGroup(hdfFc, 800, DATASETS_GROUP, rootGroup); + Group group = GroupImpl.createGroup(hdfBackingStorage, 800, DATASETS_GROUP, rootGroup); String path = "float/float32"; Node child = group.getByPath(path); assertThat(child.getPath(), is(equalTo(group.getPath() + path))); @@ -104,7 +104,7 @@ void testGetByPathWithValidPathReturnsNode() { @Test void testGetByPathThroughDatasetThrows() { - Group group = GroupImpl.createGroup(hdfFc, 800, DATASETS_GROUP, rootGroup); + Group group = GroupImpl.createGroup(hdfBackingStorage, 800, DATASETS_GROUP, rootGroup); // Try to keep resolving a path through a dataset 'float32' this should return // null String path = "float/float32/missing_node"; diff --git a/jhdf/src/test/java/io/jhdf/HdfFileChannelTest.java b/jhdf/src/test/java/io/jhdf/HdfFileChannelTest.java index fd32d4ea..1d25fc56 100644 --- a/jhdf/src/test/java/io/jhdf/HdfFileChannelTest.java +++ b/jhdf/src/test/java/io/jhdf/HdfFileChannelTest.java @@ -10,7 +10,6 @@ package io.jhdf; import io.jhdf.exceptions.HdfException; -import io.jhdf.storage.HdfBackingStorage; import io.jhdf.storage.HdfFileChannel; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -45,7 +44,7 @@ class HdfFileChannelTest { Superblock sb; // Under test - private HdfBackingStorage hdfFc; + private HdfFileChannel hdfFc; @BeforeEach void before() { diff --git a/jhdf/src/test/java/io/jhdf/LocalHeapTest.java b/jhdf/src/test/java/io/jhdf/LocalHeapTest.java index d939f5ac..b9daca29 100644 --- a/jhdf/src/test/java/io/jhdf/LocalHeapTest.java +++ b/jhdf/src/test/java/io/jhdf/LocalHeapTest.java @@ -28,24 +28,24 @@ import static org.hamcrest.Matchers.is; class LocalHeapTest { - private HdfBackingStorage hdfFc; + private HdfBackingStorage hdfBackingStorage; @BeforeEach void setUp() throws IOException, URISyntaxException { final URI testFileUri = this.getClass().getResource("/hdf5/test_file.hdf5").toURI(); FileChannel fc = FileChannel.open(Paths.get(testFileUri), StandardOpenOption.READ); Superblock sb = Superblock.readSuperblock(fc, 0); - hdfFc = new HdfFileChannel(fc, sb); + hdfBackingStorage = new HdfFileChannel(fc, sb); } @AfterEach void after() { - hdfFc.close(); + hdfBackingStorage.close(); } @Test void testLocalHeap() { - LocalHeap heap = new LocalHeap(hdfFc, 680); + LocalHeap heap = new LocalHeap(hdfBackingStorage, 680); assertThat(heap.getVersion(), is(equalTo((short) 0))); assertThat(heap.getDataSegmentSize(), is(equalTo(88L))); @@ -57,7 +57,7 @@ void testLocalHeap() { @Test void testAccessingData() { - LocalHeap heap = new LocalHeap(hdfFc, 680); + LocalHeap heap = new LocalHeap(hdfBackingStorage, 680); ByteBuffer bb = heap.getDataBuffer(); assertThat(bb.capacity(), is(equalTo(88))); // Test reading a name from the heap diff --git a/jhdf/src/test/java/io/jhdf/ObjectHeaderTest.java b/jhdf/src/test/java/io/jhdf/ObjectHeaderTest.java index d5bea714..abdcad5d 100644 --- a/jhdf/src/test/java/io/jhdf/ObjectHeaderTest.java +++ b/jhdf/src/test/java/io/jhdf/ObjectHeaderTest.java @@ -35,7 +35,7 @@ import static org.mockito.Mockito.verify; class ObjectHeaderTest { - private HdfBackingStorage hdfFc; + private HdfBackingStorage hdfBackingStorage; private Superblock sb; private FileChannel fc; @@ -44,18 +44,18 @@ void setUp() throws IOException, URISyntaxException { final URI testFileUri = this.getClass().getResource("/hdf5/test_file.hdf5").toURI(); fc = FileChannel.open(Paths.get(testFileUri), StandardOpenOption.READ); sb = Superblock.readSuperblock(fc, 0); - hdfFc = new HdfFileChannel(fc, sb); + hdfBackingStorage = new HdfFileChannel(fc, sb); } @AfterEach void after() throws IOException { - hdfFc.close(); + hdfBackingStorage.close(); fc.close(); } @Test void testObjectHeaderOnGroup() { - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 800); // dataset_group header + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 800); // dataset_group header assertThat(oh.getVersion(), is(equalTo(1))); assertThat(oh.getAddress(), is(equalTo(800L))); @@ -70,7 +70,7 @@ void testObjectHeaderOnGroup() { @Test void testObjectHeaderOnFloat32Dataset() { - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 7272); // float32 header + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 7272); // float32 header assertThat(oh.getVersion(), is(equalTo(1))); assertThat(oh.getAddress(), is(equalTo(7272L))); @@ -85,7 +85,7 @@ void testObjectHeaderOnFloat32Dataset() { @Test void testObjectHeaderOnFloat64Dataset() { - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 7872); // float64 header + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 7872); // float64 header assertThat(oh.getVersion(), is(equalTo(1))); assertThat(oh.getAddress(), is(equalTo(7872L))); @@ -100,7 +100,7 @@ void testObjectHeaderOnFloat64Dataset() { @Test void testObjectHeaderOnInt8Dataset() { - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 10904); // int8 header + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 10904); // int8 header assertThat(oh.getVersion(), is(equalTo(1))); assertThat(oh.getAddress(), is(equalTo(10904L))); diff --git a/jhdf/src/test/java/io/jhdf/ObjectHeaderV2Test.java b/jhdf/src/test/java/io/jhdf/ObjectHeaderV2Test.java index 21579391..df6ff907 100644 --- a/jhdf/src/test/java/io/jhdf/ObjectHeaderV2Test.java +++ b/jhdf/src/test/java/io/jhdf/ObjectHeaderV2Test.java @@ -32,24 +32,24 @@ class ObjectHeaderV2Test { /** This will need to be updated each time the test files are regenerated */ private static final long TIMESTAMP = 1553279213L; - private HdfBackingStorage hdfFc; + private HdfBackingStorage hdfBackingStorage; @BeforeEach void setUp() throws IOException, URISyntaxException { final URI testFileUri = this.getClass().getResource("/hdf5/test_file2.hdf5").toURI(); FileChannel fc = FileChannel.open(Paths.get(testFileUri), StandardOpenOption.READ); Superblock sb = Superblock.readSuperblock(fc, 0); - hdfFc = new HdfFileChannel(fc, sb); + hdfBackingStorage = new HdfFileChannel(fc, sb); } @AfterEach void after() { - hdfFc.close(); + hdfBackingStorage.close(); } @Test void testRootGroupObjectHeaderV2() { - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 48); // Root group header + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 48); // Root group header assertThat(oh.getVersion(), is(equalTo(2))); assertThat(oh.getAddress(), is(equalTo(48L))); @@ -69,7 +69,7 @@ void testRootGroupObjectHeaderV2() { @Test void testDatasetsGroupObjectHeaderV2() { - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 195); // Root group header + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 195); // Root group header assertThat(oh.getVersion(), is(equalTo(2))); assertThat(oh.getAddress(), is(equalTo(195L))); @@ -89,7 +89,7 @@ void testDatasetsGroupObjectHeaderV2() { @Test void testObjectHeaderOnFloat16Dataset() { - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 608); // float16 header + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 608); // float16 header assertThat(oh.getVersion(), is(equalTo(2))); assertThat(oh.getAddress(), is(equalTo(608L))); @@ -109,7 +109,7 @@ void testObjectHeaderOnFloat16Dataset() { @Test void testObjectHeaderOnFloat32Dataset() { - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 892); // float32 header + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 892); // float32 header assertThat(oh.getVersion(), is(equalTo(2))); assertThat(oh.getAddress(), is(equalTo(892L))); @@ -129,7 +129,7 @@ void testObjectHeaderOnFloat32Dataset() { @Test void testObjectHeaderOnFloat64Dataset() { - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 1176); // float64 header + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 1176); // float64 header assertThat(oh.getVersion(), is(equalTo(2))); assertThat(oh.getAddress(), is(equalTo(1176L))); @@ -149,7 +149,7 @@ void testObjectHeaderOnFloat64Dataset() { @Test void testObjectHeaderOnInt8Dataset() { - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 1655); // int8 header + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 1655); // int8 header assertThat(oh.getVersion(), is(equalTo(2))); assertThat(oh.getAddress(), is(equalTo(1655L))); @@ -173,9 +173,9 @@ void testCreationOrderTracked() throws IOException, URISyntaxException { final URI testFileUri = this.getClass().getResource("/hdf5/test_attribute_with_creation_order.hdf5").toURI(); FileChannel fc = FileChannel.open(Paths.get(testFileUri), StandardOpenOption.READ); Superblock sb = Superblock.readSuperblock(fc, 0); - HdfBackingStorage hdfFc = new HdfFileChannel(fc, sb); + HdfBackingStorage hdfBackingStorage = new HdfFileChannel(fc, sb); - ObjectHeader oh = ObjectHeader.readObjectHeader(hdfFc, 48); + ObjectHeader oh = ObjectHeader.readObjectHeader(hdfBackingStorage, 48); assertThat(oh.getVersion(), is(equalTo(2))); assertThat(oh.getAddress(), is(equalTo(48L))); @@ -183,7 +183,7 @@ void testCreationOrderTracked() throws IOException, URISyntaxException { assertThat(oh.isAttributeCreationOrderIndexed(), is(true)); assertThat(oh.isAttributeCreationOrderTracked(), is(true)); - hdfFc.close(); + hdfBackingStorage.close(); } } diff --git a/jhdf/src/test/java/io/jhdf/SymbolTableEntryTest.java b/jhdf/src/test/java/io/jhdf/SymbolTableEntryTest.java index d92e4d5f..26fde32d 100644 --- a/jhdf/src/test/java/io/jhdf/SymbolTableEntryTest.java +++ b/jhdf/src/test/java/io/jhdf/SymbolTableEntryTest.java @@ -27,24 +27,24 @@ import static org.hamcrest.Matchers.is; class SymbolTableEntryTest { - private HdfBackingStorage hdfFc; + private HdfBackingStorage hdfBackingStorage; @BeforeEach void setUp() throws URISyntaxException, IOException { final URI testFileUri = this.getClass().getResource("/hdf5/test_file.hdf5").toURI(); FileChannel fc = FileChannel.open(Paths.get(testFileUri), StandardOpenOption.READ); Superblock sb = Superblock.readSuperblock(fc, 0); - hdfFc = new HdfFileChannel(fc, sb); + hdfBackingStorage = new HdfFileChannel(fc, sb); } @AfterEach void after() { - hdfFc.close(); + hdfBackingStorage.close(); } @Test void testSymbolTableEntry() { - SymbolTableEntry ste = new SymbolTableEntry(hdfFc, 56); + SymbolTableEntry ste = new SymbolTableEntry(hdfBackingStorage, 56); assertThat(ste.getLinkNameOffset(), is(equalTo(0))); assertThat(ste.getObjectHeaderAddress(), is(equalTo(96L))); assertThat(ste.getCacheType(), is(equalTo(1))); diff --git a/jhdf/src/test/java/io/jhdf/btree/BTreeV1Test.java b/jhdf/src/test/java/io/jhdf/btree/BTreeV1Test.java index 3ccc2422..509b962a 100644 --- a/jhdf/src/test/java/io/jhdf/btree/BTreeV1Test.java +++ b/jhdf/src/test/java/io/jhdf/btree/BTreeV1Test.java @@ -33,24 +33,24 @@ import static org.junit.jupiter.api.Assertions.assertThrows; class BTreeV1Test { - private HdfBackingStorage hdfFc; + private HdfBackingStorage hdfBackingStorage; @BeforeEach void setUp() throws URISyntaxException, IOException { final URI testFileUri = this.getClass().getResource("/hdf5/test_chunked_datasets_earliest.hdf5").toURI(); FileChannel fc = FileChannel.open(Paths.get(testFileUri), StandardOpenOption.READ); Superblock sb = Superblock.readSuperblock(fc, 0); - hdfFc = new HdfFileChannel(fc, sb); + hdfBackingStorage = new HdfFileChannel(fc, sb); } @AfterEach void after() { - hdfFc.close(); + hdfBackingStorage.close(); } @Test void testGroupBTreeNode() { - BTreeV1Group bTree = BTreeV1.createGroupBTree(hdfFc, 136); + BTreeV1Group bTree = BTreeV1.createGroupBTree(hdfBackingStorage, 136); assertThat(bTree.getAddress(), is(equalTo(136L))); assertThat(bTree.getEntriesUsed(), is(equalTo(1))); @@ -62,7 +62,7 @@ void testGroupBTreeNode() { @Test void testDataBTreeNode() { - BTreeV1Data bTree = BTreeV1.createDataBTree(hdfFc, 2104, 3); + BTreeV1Data bTree = BTreeV1.createDataBTree(hdfBackingStorage, 2104, 3); assertThat(bTree.getAddress(), is(equalTo(2104L))); assertThat(bTree.getEntriesUsed(), is(equalTo(20))); @@ -74,11 +74,11 @@ void testDataBTreeNode() { @Test void testCreatingBTreeOfDataTypeWithGroupThrows() { - assertThrows(HdfException.class, () -> BTreeV1.createDataBTree(hdfFc, 136, 1245)); + assertThrows(HdfException.class, () -> BTreeV1.createDataBTree(hdfBackingStorage, 136, 1245)); } @Test void testCreatingBTreeOfGroupTypeWithDataThrows() { - assertThrows(HdfException.class, () -> BTreeV1.createGroupBTree(hdfFc, 2104)); + assertThrows(HdfException.class, () -> BTreeV1.createGroupBTree(hdfBackingStorage, 2104)); } } diff --git a/jhdf/src/test/java/io/jhdf/dataset/DatasetByAddressTest.java b/jhdf/src/test/java/io/jhdf/dataset/DatasetByAddressTest.java index 1112432f..ac869464 100644 --- a/jhdf/src/test/java/io/jhdf/dataset/DatasetByAddressTest.java +++ b/jhdf/src/test/java/io/jhdf/dataset/DatasetByAddressTest.java @@ -67,9 +67,9 @@ Collection scalarDatasetTests() { private Executable createTest(HdfFile file, long address) { return () -> { - HdfBackingStorage hdfFc = file.getHdfBackingStorage(); - ObjectHeader header = ObjectHeader.readObjectHeader(hdfFc, address); - Dataset dataset = DatasetLoader.createDataset(hdfFc, header, "unknown dataset", NoParent.INSTANCE); + HdfBackingStorage hdfBackingStorage = file.getHdfBackingStorage(); + ObjectHeader header = ObjectHeader.readObjectHeader(hdfBackingStorage, address); + Dataset dataset = DatasetLoader.createDataset(hdfBackingStorage, header, "unknown dataset", NoParent.INSTANCE); Object data = dataset.getData(); assertThat(getDimensions(data), is(equalTo(new int[]{10}))); Object[] flatData = flatten(data); diff --git a/jhdf/src/test/java/io/jhdf/object/datatype/FixedPointTest.java b/jhdf/src/test/java/io/jhdf/object/datatype/FixedPointTest.java index 6000b6dd..bb41ce2f 100644 --- a/jhdf/src/test/java/io/jhdf/object/datatype/FixedPointTest.java +++ b/jhdf/src/test/java/io/jhdf/object/datatype/FixedPointTest.java @@ -130,27 +130,27 @@ Stream datasetReadTests() { private Executable createTest(ByteBuffer buffer, FixedPoint dataType, int[] dims, Object expected) { return () -> { buffer.rewind(); // For shared buffers - HdfBackingStorage hdfFc = mock(HdfFileChannel.class); - Object actual = dataType.fillData(buffer, dims, hdfFc); + HdfBackingStorage hdfBackingStorage = mock(HdfFileChannel.class); + Object actual = dataType.fillData(buffer, dims, hdfBackingStorage); assertThat(actual, is(expected)); - verifyNoInteractions(hdfFc); + verifyNoInteractions(hdfBackingStorage); }; } @Test void testUnsupportedFixedPointLengthThrows() { FixedPoint invalidDataType = mockFixedPoint(true, 11); // 11 byte data is not supported - HdfBackingStorage hdfFc = mock(HdfFileChannel.class); - assertThrows(HdfTypeException.class, () -> invalidDataType.fillData(longBuffer, dims, hdfFc)); - verifyNoInteractions(hdfFc); + HdfBackingStorage hdfBackingStorage = mock(HdfBackingStorage.class); + assertThrows(HdfTypeException.class, () -> invalidDataType.fillData(longBuffer, dims, hdfBackingStorage)); + verifyNoInteractions(hdfBackingStorage); } @Test void testUnsupportedUnsignedFixedPointLengthThrows() { FixedPoint invalidDataType = mockFixedPoint(false, 11); // 11 byte data is not supported - HdfBackingStorage hdfFc = mock(HdfFileChannel.class); - assertThrows(HdfTypeException.class, () -> invalidDataType.fillData(longBuffer, dims, hdfFc)); - verifyNoInteractions(hdfFc); + HdfBackingStorage hdfBackingStorage = mock(HdfBackingStorage.class); + assertThrows(HdfTypeException.class, () -> invalidDataType.fillData(longBuffer, dims, hdfBackingStorage)); + verifyNoInteractions(hdfBackingStorage); } } diff --git a/jhdf/src/test/java/io/jhdf/object/datatype/FloatingPointTest.java b/jhdf/src/test/java/io/jhdf/object/datatype/FloatingPointTest.java index e3dd5bef..6f69b988 100644 --- a/jhdf/src/test/java/io/jhdf/object/datatype/FloatingPointTest.java +++ b/jhdf/src/test/java/io/jhdf/object/datatype/FloatingPointTest.java @@ -85,19 +85,19 @@ Stream datasetReadTests() { private Executable createTest(ByteBuffer buffer, FloatingPoint dataType, int[] dims, Object expected) { return () -> { buffer.rewind(); // For shared buffers - HdfBackingStorage hdfFc = mock(HdfFileChannel.class); - Object actual = dataType.fillData(buffer, dims, hdfFc); + HdfBackingStorage hdfBackingStorage = mock(HdfFileChannel.class); + Object actual = dataType.fillData(buffer, dims, hdfBackingStorage); assertThat(actual, is(expected)); - verifyNoInteractions(hdfFc); + verifyNoInteractions(hdfBackingStorage); }; } @Test void testUnsupportedFloatingPointLengthThrows() { FloatingPoint invalidDataType = mockFloatingPoint(11); // 11 byte data is not supported - HdfBackingStorage hdfFc = mock(HdfFileChannel.class); - assertThrows(HdfTypeException.class, () -> invalidDataType.fillData(floatBuffer, dims, hdfFc)); - verifyNoInteractions(hdfFc); + HdfBackingStorage hdfBackingStorage = mock(HdfBackingStorage.class); + assertThrows(HdfTypeException.class, () -> invalidDataType.fillData(floatBuffer, dims, hdfBackingStorage)); + verifyNoInteractions(hdfBackingStorage); } /** diff --git a/jhdf/src/test/java/io/jhdf/object/datatype/ReferenceTest.java b/jhdf/src/test/java/io/jhdf/object/datatype/ReferenceTest.java index ea798156..eac773b8 100644 --- a/jhdf/src/test/java/io/jhdf/object/datatype/ReferenceTest.java +++ b/jhdf/src/test/java/io/jhdf/object/datatype/ReferenceTest.java @@ -58,10 +58,10 @@ Collection datasetReadTests() { private Executable createTest(ByteBuffer buffer, Reference dataType, int[] dims, Object expected) { return () -> { buffer.rewind(); // For shared buffers - HdfBackingStorage hdfFc = mock(HdfFileChannel.class); - Object actual = dataType.fillData(buffer, dims, hdfFc); + HdfBackingStorage hdfBackingStorage = mock(HdfFileChannel.class); + Object actual = dataType.fillData(buffer, dims, hdfBackingStorage); assertThat(actual, is(expected)); - verifyNoInteractions(hdfFc); + verifyNoInteractions(hdfBackingStorage); }; } @@ -69,9 +69,9 @@ private Executable createTest(ByteBuffer buffer, Reference dataType, int[] dims, @Test void testUnsupportedReferenceLengthThrows() { Reference invalidDataType = mockReference(11); // 11 byte data is not supported - HdfBackingStorage hdfFc = mock(HdfFileChannel.class); - assertThrows(HdfTypeException.class, () -> invalidDataType.fillData(referenceIntBuffer, dims, hdfFc)); - verifyNoInteractions(hdfFc); + HdfBackingStorage hdfBackingStorage = mock(HdfFileChannel.class); + assertThrows(HdfTypeException.class, () -> invalidDataType.fillData(referenceIntBuffer, dims, hdfBackingStorage)); + verifyNoInteractions(hdfBackingStorage); } }