722
722
offsets = list(offsets)
724
sorted_offsets = sorted(offsets)
725
# turn the list of offsets into a stack
726
offset_stack = iter(offsets)
727
cur_offset_and_size = offset_stack.next()
728
coalesced = list(self._coalesce_offsets(sorted_offsets,
729
limit=self._max_readv_combine,
730
fudge_factor=self._bytes_to_read_before_seek))
723
733
resp = self._client._call_with_upload(
725
735
(self._remote_path(relpath),),
726
self._serialise_offsets(offsets))
736
self._serialise_offsets((c.start, c.length) for c in coalesced))
728
738
if resp[0] != 'readv':
739
# This should raise an exception
729
740
self._translate_error(resp)
731
data = self._client._recv_bulk()
733
for start, length in offsets:
734
next_pos = cur_pos + length
735
if len(data) < next_pos:
736
raise errors.ShortReadvError(relpath, start, length,
737
actual=len(data)-cur_pos)
738
cur_data = data[cur_pos:next_pos]
740
yield start, cur_data
743
data = self._client._recv_bulk()
744
# Cache the results, but only until they have been fulfilled
746
for c_offset in coalesced:
747
if len(data) < c_offset.length:
748
raise errors.ShortReadvError(relpath, c_offset.start,
749
c_offset.length, actual=len(data))
750
for suboffset, subsize in c_offset.ranges:
751
key = (c_offset.start+suboffset, subsize)
752
data_map[key] = data[suboffset:suboffset+subsize]
753
data = data[c_offset.length:]
755
# Now that we've read some data, see if we can yield anything back
756
while cur_offset_and_size in data_map:
757
this_data = data_map.pop(cur_offset_and_size)
758
yield cur_offset_and_size[0], this_data
759
cur_offset_and_size = offset_stack.next()
742
761
def rename(self, rel_from, rel_to):
743
762
self._call('rename',