bzr branch
http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
|
3376.2.12
by Martin Pool
pyflakes corrections (thanks spiv) |
1 |
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
2 |
#
|
3 |
# This program is free software; you can redistribute it and/or modify
|
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
8 |
# This program is distributed in the hope that it will be useful,
|
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
12 |
#
|
|
13 |
# You should have received a copy of the GNU General Public License
|
|
14 |
# along with this program; if not, write to the Free Software
|
|
15 |
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
16 |
||
17 |
from bzrlib.lazy_import import lazy_import |
|
18 |
lazy_import(globals(), """ |
|
19 |
from itertools import izip
|
|
20 |
import math
|
|
21 |
import md5
|
|
|
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
22 |
import time
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
23 |
|
24 |
from bzrlib import (
|
|
|
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
25 |
debug,
|
|
3099.3.1
by John Arbash Meinel
Implement get_parent_map for ParentProviders |
26 |
graph,
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
27 |
pack,
|
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
28 |
ui,
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
29 |
)
|
30 |
from bzrlib.index import (
|
|
31 |
GraphIndex,
|
|
32 |
GraphIndexBuilder,
|
|
33 |
InMemoryGraphIndex,
|
|
34 |
CombinedGraphIndex,
|
|
35 |
GraphIndexPrefixAdapter,
|
|
36 |
)
|
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
37 |
from bzrlib.knit import (
|
38 |
KnitPlainFactory,
|
|
39 |
KnitVersionedFiles,
|
|
40 |
_KnitGraphIndex,
|
|
41 |
_DirectPackAccess,
|
|
42 |
)
|
|
43 |
from bzrlib.osutils import rand_chars, split_lines
|
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
44 |
from bzrlib.pack import ContainerWriter
|
45 |
from bzrlib.store import revision
|
|
|
3063.2.1
by Robert Collins
Solve reconciling erroring when multiple portions of a single delta chain are being reinserted. |
46 |
from bzrlib import tsort
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
47 |
""") |
48 |
from bzrlib import ( |
|
49 |
bzrdir, |
|
50 |
errors, |
|
51 |
lockable_files, |
|
52 |
lockdir, |
|
53 |
osutils, |
|
|
3099.3.3
by John Arbash Meinel
Deprecate get_parents() in favor of get_parent_map() |
54 |
symbol_versioning, |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
55 |
transactions, |
56 |
xml5, |
|
|
2996.2.11
by Aaron Bentley
Implement rich-root-pack format ( #164639) |
57 |
xml6, |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
58 |
xml7, |
59 |
)
|
|
60 |
||
61 |
from bzrlib.decorators import needs_read_lock, needs_write_lock |
|
|
2592.3.166
by Robert Collins
Merge KnitRepository3 removal branch. |
62 |
from bzrlib.repofmt.knitrepo import KnitRepository |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
63 |
from bzrlib.repository import ( |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
64 |
CommitBuilder, |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
65 |
MetaDirRepository, |
66 |
MetaDirRepositoryFormat, |
|
|
3376.2.12
by Martin Pool
pyflakes corrections (thanks spiv) |
67 |
RepositoryFormat, |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
68 |
RootCommitBuilder, |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
69 |
)
|
70 |
import bzrlib.revision as _mod_revision |
|
71 |
from bzrlib.store.versioned import VersionedFileStore |
|
|
3376.2.12
by Martin Pool
pyflakes corrections (thanks spiv) |
72 |
from bzrlib.trace import ( |
73 |
mutter, |
|
74 |
mutter_callsite, |
|
75 |
note, |
|
76 |
warning, |
|
77 |
)
|
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
78 |
|
79 |
||
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
80 |
class PackCommitBuilder(CommitBuilder): |
81 |
"""A subclass of CommitBuilder to add texts with pack semantics. |
|
82 |
|
|
83 |
Specifically this uses one knit object rather than one knit object per
|
|
84 |
added text, reducing memory and object pressure.
|
|
85 |
"""
|
|
86 |
||
|
2979.2.2
by Robert Collins
Per-file graph heads detection during commit for pack repositories. |
87 |
def __init__(self, repository, parents, config, timestamp=None, |
88 |
timezone=None, committer=None, revprops=None, |
|
89 |
revision_id=None): |
|
90 |
CommitBuilder.__init__(self, repository, parents, config, |
|
91 |
timestamp=timestamp, timezone=timezone, committer=committer, |
|
92 |
revprops=revprops, revision_id=revision_id) |
|
|
3099.3.1
by John Arbash Meinel
Implement get_parent_map for ParentProviders |
93 |
self._file_graph = graph.Graph( |
|
2979.2.2
by Robert Collins
Per-file graph heads detection during commit for pack repositories. |
94 |
repository._pack_collection.text_index.combined_index) |
95 |
||
|
2979.2.5
by Robert Collins
Make CommitBuilder.heads be _heads as its internal to CommitBuilder only. |
96 |
def _heads(self, file_id, revision_ids): |
|
2979.2.2
by Robert Collins
Per-file graph heads detection during commit for pack repositories. |
97 |
keys = [(file_id, revision_id) for revision_id in revision_ids] |
98 |
return set([key[1] for key in self._file_graph.heads(keys)]) |
|
99 |
||
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
100 |
|
101 |
class PackRootCommitBuilder(RootCommitBuilder): |
|
102 |
"""A subclass of RootCommitBuilder to add texts with pack semantics. |
|
103 |
|
|
104 |
Specifically this uses one knit object rather than one knit object per
|
|
105 |
added text, reducing memory and object pressure.
|
|
106 |
"""
|
|
107 |
||
|
2979.2.2
by Robert Collins
Per-file graph heads detection during commit for pack repositories. |
108 |
def __init__(self, repository, parents, config, timestamp=None, |
109 |
timezone=None, committer=None, revprops=None, |
|
110 |
revision_id=None): |
|
111 |
CommitBuilder.__init__(self, repository, parents, config, |
|
112 |
timestamp=timestamp, timezone=timezone, committer=committer, |
|
113 |
revprops=revprops, revision_id=revision_id) |
|
|
3099.3.1
by John Arbash Meinel
Implement get_parent_map for ParentProviders |
114 |
self._file_graph = graph.Graph( |
|
2979.2.2
by Robert Collins
Per-file graph heads detection during commit for pack repositories. |
115 |
repository._pack_collection.text_index.combined_index) |
116 |
||
|
2979.2.5
by Robert Collins
Make CommitBuilder.heads be _heads as its internal to CommitBuilder only. |
117 |
def _heads(self, file_id, revision_ids): |
|
2979.2.2
by Robert Collins
Per-file graph heads detection during commit for pack repositories. |
118 |
keys = [(file_id, revision_id) for revision_id in revision_ids] |
119 |
return set([key[1] for key in self._file_graph.heads(keys)]) |
|
120 |
||
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
121 |
|
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
122 |
class Pack(object): |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
123 |
"""An in memory proxy for a pack and its indices. |
124 |
||
125 |
This is a base class that is not directly used, instead the classes
|
|
126 |
ExistingPack and NewPack are used.
|
|
127 |
"""
|
|
128 |
||
|
2592.3.197
by Robert Collins
Hand over signature index creation to NewPack. |
129 |
def __init__(self, revision_index, inventory_index, text_index, |
130 |
signature_index): |
|
|
2592.3.192
by Robert Collins
Move new revision index management to NewPack. |
131 |
"""Create a pack instance. |
132 |
||
133 |
:param revision_index: A GraphIndex for determining what revisions are
|
|
134 |
present in the Pack and accessing the locations of their texts.
|
|
|
2592.3.195
by Robert Collins
Move some inventory index logic to NewPack. |
135 |
:param inventory_index: A GraphIndex for determining what inventories are
|
|
2592.3.196
by Robert Collins
Move some text index logic to NewPack. |
136 |
present in the Pack and accessing the locations of their
|
137 |
texts/deltas.
|
|
138 |
:param text_index: A GraphIndex for determining what file texts
|
|
|
2592.3.197
by Robert Collins
Hand over signature index creation to NewPack. |
139 |
are present in the pack and accessing the locations of their
|
140 |
texts/deltas (via (fileid, revisionid) tuples).
|
|
141 |
:param revision_index: A GraphIndex for determining what signatures are
|
|
142 |
present in the Pack and accessing the locations of their texts.
|
|
|
2592.3.192
by Robert Collins
Move new revision index management to NewPack. |
143 |
"""
|
144 |
self.revision_index = revision_index |
|
|
2592.3.195
by Robert Collins
Move some inventory index logic to NewPack. |
145 |
self.inventory_index = inventory_index |
|
2592.3.196
by Robert Collins
Move some text index logic to NewPack. |
146 |
self.text_index = text_index |
|
2592.3.197
by Robert Collins
Hand over signature index creation to NewPack. |
147 |
self.signature_index = signature_index |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
148 |
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
149 |
def access_tuple(self): |
150 |
"""Return a tuple (transport, name) for the pack content.""" |
|
151 |
return self.pack_transport, self.file_name() |
|
152 |
||
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
153 |
def file_name(self): |
154 |
"""Get the file name for the pack on disk.""" |
|
155 |
return self.name + '.pack' |
|
156 |
||
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
157 |
def get_revision_count(self): |
158 |
return self.revision_index.key_count() |
|
159 |
||
160 |
def inventory_index_name(self, name): |
|
161 |
"""The inv index is the name + .iix.""" |
|
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
162 |
return self.index_name('inventory', name) |
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
163 |
|
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
164 |
def revision_index_name(self, name): |
165 |
"""The revision index is the name + .rix.""" |
|
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
166 |
return self.index_name('revision', name) |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
167 |
|
168 |
def signature_index_name(self, name): |
|
169 |
"""The signature index is the name + .six.""" |
|
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
170 |
return self.index_name('signature', name) |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
171 |
|
172 |
def text_index_name(self, name): |
|
173 |
"""The text index is the name + .tix.""" |
|
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
174 |
return self.index_name('text', name) |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
175 |
|
|
3035.2.5
by John Arbash Meinel
Rename function to remove _new_ (per Robert's suggestion) |
176 |
def _external_compression_parents_of_texts(self): |
|
3035.2.4
by John Arbash Meinel
Fix bug #165290 by having the fetch code check that all external references are satisfied before it allows the data to be committed. |
177 |
keys = set() |
178 |
refs = set() |
|
179 |
for node in self.text_index.iter_all_entries(): |
|
180 |
keys.add(node[1]) |
|
181 |
refs.update(node[3][1]) |
|
182 |
return refs - keys |
|
183 |
||
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
184 |
|
185 |
class ExistingPack(Pack): |
|
|
2592.3.222
by Robert Collins
More review feedback. |
186 |
"""An in memory proxy for an existing .pack and its disk indices.""" |
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
187 |
|
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
188 |
def __init__(self, pack_transport, name, revision_index, inventory_index, |
|
2592.3.177
by Robert Collins
Make all parameters to Pack objects mandatory. |
189 |
text_index, signature_index): |
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
190 |
"""Create an ExistingPack object. |
191 |
||
192 |
:param pack_transport: The transport where the pack file resides.
|
|
193 |
:param name: The name of the pack on disk in the pack_transport.
|
|
194 |
"""
|
|
|
2592.3.197
by Robert Collins
Hand over signature index creation to NewPack. |
195 |
Pack.__init__(self, revision_index, inventory_index, text_index, |
196 |
signature_index) |
|
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
197 |
self.name = name |
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
198 |
self.pack_transport = pack_transport |
|
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
199 |
if None in (revision_index, inventory_index, text_index, |
200 |
signature_index, name, pack_transport): |
|
201 |
raise AssertionError() |
|
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
202 |
|
203 |
def __eq__(self, other): |
|
204 |
return self.__dict__ == other.__dict__ |
|
205 |
||
206 |
def __ne__(self, other): |
|
207 |
return not self.__eq__(other) |
|
208 |
||
209 |
def __repr__(self): |
|
210 |
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % ( |
|
211 |
id(self), self.transport, self.name) |
|
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
212 |
|
213 |
||
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
214 |
class NewPack(Pack): |
215 |
"""An in memory proxy for a pack which is being created.""" |
|
216 |
||
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
217 |
# A map of index 'type' to the file extension and position in the
|
218 |
# index_sizes array.
|
|
|
2592.3.227
by Martin Pool
Rename NewPack.indices to NewPack.index_definitions |
219 |
index_definitions = { |
|
2592.3.226
by Martin Pool
formatting and docstrings |
220 |
'revision': ('.rix', 0), |
221 |
'inventory': ('.iix', 1), |
|
222 |
'text': ('.tix', 2), |
|
223 |
'signature': ('.six', 3), |
|
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
224 |
}
|
225 |
||
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
226 |
def __init__(self, upload_transport, index_transport, pack_transport, |
|
3010.1.11
by Robert Collins
Provide file modes to files created by pack repositories |
227 |
upload_suffix='', file_mode=None): |
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
228 |
"""Create a NewPack instance. |
229 |
||
230 |
:param upload_transport: A writable transport for the pack to be
|
|
231 |
incrementally uploaded to.
|
|
232 |
:param index_transport: A writable transport for the pack's indices to
|
|
233 |
be written to when the pack is finished.
|
|
234 |
:param pack_transport: A writable transport for the pack to be renamed
|
|
|
2592.3.206
by Robert Collins
Move pack rename-into-place into NewPack.finish and document hash-collision cases somewhat better. |
235 |
to when the upload is complete. This *must* be the same as
|
236 |
upload_transport.clone('../packs').
|
|
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
237 |
:param upload_suffix: An optional suffix to be given to any temporary
|
238 |
files created during the pack creation. e.g '.autopack'
|
|
|
3010.1.11
by Robert Collins
Provide file modes to files created by pack repositories |
239 |
:param file_mode: An optional file mode to create the new files with.
|
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
240 |
"""
|
|
2592.3.228
by Martin Pool
docstrings and error messages from review |
241 |
# The relative locations of the packs are constrained, but all are
|
242 |
# passed in because the caller has them, so as to avoid object churn.
|
|
|
2592.3.195
by Robert Collins
Move some inventory index logic to NewPack. |
243 |
Pack.__init__(self, |
244 |
# Revisions: parents list, no text compression.
|
|
245 |
InMemoryGraphIndex(reference_lists=1), |
|
246 |
# Inventory: We want to map compression only, but currently the
|
|
247 |
# knit code hasn't been updated enough to understand that, so we
|
|
248 |
# have a regular 2-list index giving parents and compression
|
|
249 |
# source.
|
|
|
2592.3.196
by Robert Collins
Move some text index logic to NewPack. |
250 |
InMemoryGraphIndex(reference_lists=2), |
251 |
# Texts: compression and per file graph, for all fileids - so two
|
|
252 |
# reference lists and two elements in the key tuple.
|
|
253 |
InMemoryGraphIndex(reference_lists=2, key_elements=2), |
|
|
2592.3.197
by Robert Collins
Hand over signature index creation to NewPack. |
254 |
# Signatures: Just blobs to store, no compression, no parents
|
255 |
# listing.
|
|
256 |
InMemoryGraphIndex(reference_lists=0), |
|
|
2592.3.196
by Robert Collins
Move some text index logic to NewPack. |
257 |
)
|
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
258 |
# where should the new pack be opened
|
259 |
self.upload_transport = upload_transport |
|
260 |
# where are indices written out to
|
|
261 |
self.index_transport = index_transport |
|
262 |
# where is the pack renamed to when it is finished?
|
|
263 |
self.pack_transport = pack_transport |
|
|
3010.1.11
by Robert Collins
Provide file modes to files created by pack repositories |
264 |
# What file mode to upload the pack and indices with.
|
265 |
self._file_mode = file_mode |
|
|
2592.3.193
by Robert Collins
Move hash tracking of new packs into NewPack. |
266 |
# tracks the content written to the .pack file.
|
267 |
self._hash = md5.new() |
|
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
268 |
# a four-tuple with the length in bytes of the indices, once the pack
|
|
2592.3.195
by Robert Collins
Move some inventory index logic to NewPack. |
269 |
# is finalised. (rev, inv, text, sigs)
|
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
270 |
self.index_sizes = None |
|
2592.3.203
by Robert Collins
Teach NewPack how to buffer for pack operations. |
271 |
# How much data to cache when writing packs. Note that this is not
|
|
2592.3.222
by Robert Collins
More review feedback. |
272 |
# synchronised with reads, because it's not in the transport layer, so
|
|
2592.3.203
by Robert Collins
Teach NewPack how to buffer for pack operations. |
273 |
# is not safe unless the client knows it won't be reading from the pack
|
274 |
# under creation.
|
|
275 |
self._cache_limit = 0 |
|
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
276 |
# the temporary pack file name.
|
277 |
self.random_name = rand_chars(20) + upload_suffix |
|
278 |
# when was this pack started ?
|
|
279 |
self.start_time = time.time() |
|
|
2592.3.202
by Robert Collins
Move write stream management into NewPack. |
280 |
# open an output stream for the data added to the pack.
|
281 |
self.write_stream = self.upload_transport.open_write_stream( |
|
|
3010.1.11
by Robert Collins
Provide file modes to files created by pack repositories |
282 |
self.random_name, mode=self._file_mode) |
|
2592.3.234
by Martin Pool
Use -Dpack not -Dfetch for pack traces |
283 |
if 'pack' in debug.debug_flags: |
|
2592.3.202
by Robert Collins
Move write stream management into NewPack. |
284 |
mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs', |
285 |
time.ctime(), self.upload_transport.base, self.random_name, |
|
286 |
time.time() - self.start_time) |
|
|
2592.3.233
by Martin Pool
Review cleanups |
287 |
# A list of byte sequences to be written to the new pack, and the
|
288 |
# aggregate size of them. Stored as a list rather than separate
|
|
289 |
# variables so that the _write_data closure below can update them.
|
|
|
2592.3.203
by Robert Collins
Teach NewPack how to buffer for pack operations. |
290 |
self._buffer = [[], 0] |
|
2592.3.233
by Martin Pool
Review cleanups |
291 |
# create a callable for adding data
|
292 |
#
|
|
293 |
# robertc says- this is a closure rather than a method on the object
|
|
294 |
# so that the variables are locals, and faster than accessing object
|
|
295 |
# members.
|
|
|
2592.3.203
by Robert Collins
Teach NewPack how to buffer for pack operations. |
296 |
def _write_data(bytes, flush=False, _buffer=self._buffer, |
297 |
_write=self.write_stream.write, _update=self._hash.update): |
|
298 |
_buffer[0].append(bytes) |
|
299 |
_buffer[1] += len(bytes) |
|
|
2592.3.222
by Robert Collins
More review feedback. |
300 |
# buffer cap
|
|
2592.3.203
by Robert Collins
Teach NewPack how to buffer for pack operations. |
301 |
if _buffer[1] > self._cache_limit or flush: |
302 |
bytes = ''.join(_buffer[0]) |
|
303 |
_write(bytes) |
|
304 |
_update(bytes) |
|
305 |
_buffer[:] = [[], 0] |
|
|
2592.3.202
by Robert Collins
Move write stream management into NewPack. |
306 |
# expose this on self, for the occasion when clients want to add data.
|
307 |
self._write_data = _write_data |
|
|
2592.3.205
by Robert Collins
Move the pack ContainerWriter instance into NewPack. |
308 |
# a pack writer object to serialise pack records.
|
309 |
self._writer = pack.ContainerWriter(self._write_data) |
|
310 |
self._writer.begin() |
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
311 |
# what state is the pack in? (open, finished, aborted)
|
312 |
self._state = 'open' |
|
|
2592.3.202
by Robert Collins
Move write stream management into NewPack. |
313 |
|
314 |
def abort(self): |
|
315 |
"""Cancel creating this pack.""" |
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
316 |
self._state = 'aborted' |
|
2938.1.1
by Robert Collins
trivial fix for packs@win32: explicitly close file before deleting |
317 |
self.write_stream.close() |
|
2592.3.202
by Robert Collins
Move write stream management into NewPack. |
318 |
# Remove the temporary pack file.
|
319 |
self.upload_transport.delete(self.random_name) |
|
320 |
# The indices have no state on disk.
|
|
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
321 |
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
322 |
def access_tuple(self): |
323 |
"""Return a tuple (transport, name) for the pack content.""" |
|
324 |
if self._state == 'finished': |
|
325 |
return Pack.access_tuple(self) |
|
|
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
326 |
elif self._state == 'open': |
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
327 |
return self.upload_transport, self.random_name |
|
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
328 |
else: |
329 |
raise AssertionError(self._state) |
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
330 |
|
|
2592.3.198
by Robert Collins
Factor out data_inserted to reduce code duplication in detecting empty packs. |
331 |
def data_inserted(self): |
332 |
"""True if data has been added to this pack.""" |
|
|
2592.3.233
by Martin Pool
Review cleanups |
333 |
return bool(self.get_revision_count() or |
334 |
self.inventory_index.key_count() or |
|
335 |
self.text_index.key_count() or |
|
336 |
self.signature_index.key_count()) |
|
|
2592.3.198
by Robert Collins
Factor out data_inserted to reduce code duplication in detecting empty packs. |
337 |
|
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
338 |
def finish(self): |
339 |
"""Finish the new pack. |
|
340 |
||
341 |
This:
|
|
342 |
- finalises the content
|
|
343 |
- assigns a name (the md5 of the content, currently)
|
|
344 |
- writes out the associated indices
|
|
345 |
- renames the pack into place.
|
|
346 |
- stores the index size tuple for the pack in the index_sizes
|
|
347 |
attribute.
|
|
348 |
"""
|
|
|
2592.3.205
by Robert Collins
Move the pack ContainerWriter instance into NewPack. |
349 |
self._writer.end() |
|
2592.3.203
by Robert Collins
Teach NewPack how to buffer for pack operations. |
350 |
if self._buffer[1]: |
351 |
self._write_data('', flush=True) |
|
|
2592.3.199
by Robert Collins
Store the name of a NewPack in the object upon finish(). |
352 |
self.name = self._hash.hexdigest() |
|
2592.3.203
by Robert Collins
Teach NewPack how to buffer for pack operations. |
353 |
# write indices
|
|
2592.3.233
by Martin Pool
Review cleanups |
354 |
# XXX: It'd be better to write them all to temporary names, then
|
355 |
# rename them all into place, so that the window when only some are
|
|
356 |
# visible is smaller. On the other hand none will be seen until
|
|
357 |
# they're in the names list.
|
|
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
358 |
self.index_sizes = [None, None, None, None] |
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
359 |
self._write_index('revision', self.revision_index, 'revision') |
360 |
self._write_index('inventory', self.inventory_index, 'inventory') |
|
361 |
self._write_index('text', self.text_index, 'file texts') |
|
362 |
self._write_index('signature', self.signature_index, |
|
363 |
'revision signatures') |
|
|
2592.3.202
by Robert Collins
Move write stream management into NewPack. |
364 |
self.write_stream.close() |
|
2592.3.206
by Robert Collins
Move pack rename-into-place into NewPack.finish and document hash-collision cases somewhat better. |
365 |
# Note that this will clobber an existing pack with the same name,
|
366 |
# without checking for hash collisions. While this is undesirable this
|
|
367 |
# is something that can be rectified in a subsequent release. One way
|
|
368 |
# to rectify it may be to leave the pack at the original name, writing
|
|
369 |
# its pack-names entry as something like 'HASH: index-sizes
|
|
370 |
# temporary-name'. Allocate that and check for collisions, if it is
|
|
371 |
# collision free then rename it into place. If clients know this scheme
|
|
372 |
# they can handle missing-file errors by:
|
|
373 |
# - try for HASH.pack
|
|
374 |
# - try for temporary-name
|
|
375 |
# - refresh the pack-list to see if the pack is now absent
|
|
376 |
self.upload_transport.rename(self.random_name, |
|
377 |
'../packs/' + self.name + '.pack') |
|
|
2592.3.211
by Robert Collins
Pack inventory index management cleaned up. |
378 |
self._state = 'finished' |
|
2592.3.234
by Martin Pool
Use -Dpack not -Dfetch for pack traces |
379 |
if 'pack' in debug.debug_flags: |
|
2592.3.219
by Robert Collins
Review feedback. |
380 |
# XXX: size might be interesting?
|
381 |
mutter('%s: create_pack: pack renamed into place: %s%s->%s%s t+%6.3fs', |
|
382 |
time.ctime(), self.upload_transport.base, self.random_name, |
|
383 |
self.pack_transport, self.name, |
|
384 |
time.time() - self.start_time) |
|
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
385 |
|
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
386 |
def flush(self): |
387 |
"""Flush any current data.""" |
|
388 |
if self._buffer[1]: |
|
389 |
bytes = ''.join(self._buffer[0]) |
|
390 |
self.write_stream.write(bytes) |
|
391 |
self._hash.update(bytes) |
|
392 |
self._buffer[:] = [[], 0] |
|
393 |
||
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
394 |
def index_name(self, index_type, name): |
395 |
"""Get the disk name of an index type for pack name 'name'.""" |
|
|
2592.3.227
by Martin Pool
Rename NewPack.indices to NewPack.index_definitions |
396 |
return name + NewPack.index_definitions[index_type][0] |
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
397 |
|
398 |
def index_offset(self, index_type): |
|
399 |
"""Get the position in a index_size array for a given index type.""" |
|
|
2592.3.227
by Martin Pool
Rename NewPack.indices to NewPack.index_definitions |
400 |
return NewPack.index_definitions[index_type][1] |
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
401 |
|
|
2592.3.233
by Martin Pool
Review cleanups |
402 |
def _replace_index_with_readonly(self, index_type): |
403 |
setattr(self, index_type + '_index', |
|
404 |
GraphIndex(self.index_transport, |
|
405 |
self.index_name(index_type, self.name), |
|
406 |
self.index_sizes[self.index_offset(index_type)])) |
|
407 |
||
|
2592.3.203
by Robert Collins
Teach NewPack how to buffer for pack operations. |
408 |
def set_write_cache_size(self, size): |
409 |
self._cache_limit = size |
|
410 |
||
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
411 |
def _write_index(self, index_type, index, label): |
|
2592.3.196
by Robert Collins
Move some text index logic to NewPack. |
412 |
"""Write out an index. |
413 |
||
|
2592.3.222
by Robert Collins
More review feedback. |
414 |
:param index_type: The type of index to write - e.g. 'revision'.
|
|
2592.3.196
by Robert Collins
Move some text index logic to NewPack. |
415 |
:param index: The index object to serialise.
|
416 |
:param label: What label to give the index e.g. 'revision'.
|
|
417 |
"""
|
|
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
418 |
index_name = self.index_name(index_type, self.name) |
419 |
self.index_sizes[self.index_offset(index_type)] = \ |
|
|
3010.1.11
by Robert Collins
Provide file modes to files created by pack repositories |
420 |
self.index_transport.put_file(index_name, index.finish(), |
421 |
mode=self._file_mode) |
|
|
2592.3.234
by Martin Pool
Use -Dpack not -Dfetch for pack traces |
422 |
if 'pack' in debug.debug_flags: |
|
2592.3.196
by Robert Collins
Move some text index logic to NewPack. |
423 |
# XXX: size might be interesting?
|
424 |
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs', |
|
425 |
time.ctime(), label, self.upload_transport.base, |
|
426 |
self.random_name, time.time() - self.start_time) |
|
|
2592.3.233
by Martin Pool
Review cleanups |
427 |
# Replace the writable index on this object with a readonly,
|
428 |
# presently unloaded index. We should alter
|
|
429 |
# the index layer to make its finish() error if add_node is
|
|
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
430 |
# subsequently used. RBC
|
|
2592.3.233
by Martin Pool
Review cleanups |
431 |
self._replace_index_with_readonly(index_type) |
|
2592.3.195
by Robert Collins
Move some inventory index logic to NewPack. |
432 |
|
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
433 |
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
434 |
class AggregateIndex(object): |
435 |
"""An aggregated index for the RepositoryPackCollection. |
|
436 |
||
437 |
AggregateIndex is reponsible for managing the PackAccess object,
|
|
438 |
Index-To-Pack mapping, and all indices list for a specific type of index
|
|
439 |
such as 'revision index'.
|
|
|
2592.3.228
by Martin Pool
docstrings and error messages from review |
440 |
|
441 |
A CombinedIndex provides an index on a single key space built up
|
|
442 |
from several on-disk indices. The AggregateIndex builds on this
|
|
443 |
to provide a knit access layer, and allows having up to one writable
|
|
444 |
index within the collection.
|
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
445 |
"""
|
|
2592.3.235
by Martin Pool
Review cleanups |
446 |
# XXX: Probably 'can be written to' could/should be separated from 'acts
|
447 |
# like a knit index' -- mbp 20071024
|
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
448 |
|
449 |
def __init__(self): |
|
450 |
"""Create an AggregateIndex.""" |
|
451 |
self.index_to_pack = {} |
|
452 |
self.combined_index = CombinedGraphIndex([]) |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
453 |
self.data_access = _DirectPackAccess(self.index_to_pack) |
454 |
self.add_callback = None |
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
455 |
|
456 |
def replace_indices(self, index_to_pack, indices): |
|
457 |
"""Replace the current mappings with fresh ones. |
|
458 |
||
459 |
This should probably not be used eventually, rather incremental add and
|
|
460 |
removal of indices. It has been added during refactoring of existing
|
|
461 |
code.
|
|
462 |
||
463 |
:param index_to_pack: A mapping from index objects to
|
|
464 |
(transport, name) tuples for the pack file data.
|
|
465 |
:param indices: A list of indices.
|
|
466 |
"""
|
|
467 |
# refresh the revision pack map dict without replacing the instance.
|
|
468 |
self.index_to_pack.clear() |
|
469 |
self.index_to_pack.update(index_to_pack) |
|
470 |
# XXX: API break - clearly a 'replace' method would be good?
|
|
471 |
self.combined_index._indices[:] = indices |
|
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
472 |
# the current add nodes callback for the current writable index if
|
473 |
# there is one.
|
|
474 |
self.add_callback = None |
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
475 |
|
476 |
def add_index(self, index, pack): |
|
477 |
"""Add index to the aggregate, which is an index for Pack pack. |
|
|
2592.3.226
by Martin Pool
formatting and docstrings |
478 |
|
479 |
Future searches on the aggregate index will seach this new index
|
|
480 |
before all previously inserted indices.
|
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
481 |
|
|
2592.3.226
by Martin Pool
formatting and docstrings |
482 |
:param index: An Index for the pack.
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
483 |
:param pack: A Pack instance.
|
484 |
"""
|
|
485 |
# expose it to the index map
|
|
486 |
self.index_to_pack[index] = pack.access_tuple() |
|
487 |
# put it at the front of the linear index list
|
|
488 |
self.combined_index.insert_index(0, index) |
|
489 |
||
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
490 |
def add_writable_index(self, index, pack): |
491 |
"""Add an index which is able to have data added to it. |
|
|
2592.3.235
by Martin Pool
Review cleanups |
492 |
|
493 |
There can be at most one writable index at any time. Any
|
|
494 |
modifications made to the knit are put into this index.
|
|
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
495 |
|
496 |
:param index: An index from the pack parameter.
|
|
497 |
:param pack: A Pack instance.
|
|
498 |
"""
|
|
|
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
499 |
if self.add_callback is not None: |
500 |
raise AssertionError( |
|
501 |
"%s already has a writable index through %s" % \ |
|
502 |
(self, self.add_callback)) |
|
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
503 |
# allow writing: queue writes to a new index
|
504 |
self.add_index(index, pack) |
|
505 |
# Updates the index to packs mapping as a side effect,
|
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
506 |
self.data_access.set_writer(pack._writer, index, pack.access_tuple()) |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
507 |
self.add_callback = index.add_nodes |
508 |
||
509 |
def clear(self): |
|
510 |
"""Reset all the aggregate data to nothing.""" |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
511 |
self.data_access.set_writer(None, None, (None, None)) |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
512 |
self.index_to_pack.clear() |
513 |
del self.combined_index._indices[:] |
|
514 |
self.add_callback = None |
|
515 |
||
516 |
def remove_index(self, index, pack): |
|
517 |
"""Remove index from the indices used to answer queries. |
|
518 |
|
|
519 |
:param index: An index from the pack parameter.
|
|
520 |
:param pack: A Pack instance.
|
|
521 |
"""
|
|
522 |
del self.index_to_pack[index] |
|
523 |
self.combined_index._indices.remove(index) |
|
524 |
if (self.add_callback is not None and |
|
525 |
getattr(index, 'add_nodes', None) == self.add_callback): |
|
526 |
self.add_callback = None |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
527 |
self.data_access.set_writer(None, None, (None, None)) |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
528 |
|
|
2592.3.208
by Robert Collins
Start refactoring the knit-pack thunking to be clearer. |
529 |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
530 |
class Packer(object): |
531 |
"""Create a pack from packs.""" |
|
532 |
||
533 |
def __init__(self, pack_collection, packs, suffix, revision_ids=None): |
|
|
2951.1.3
by Robert Collins
Partial support for native reconcile with packs. |
534 |
"""Create a Packer. |
535 |
||
536 |
:param pack_collection: A RepositoryPackCollection object where the
|
|
537 |
new pack is being written to.
|
|
538 |
:param packs: The packs to combine.
|
|
539 |
:param suffix: The suffix to use on the temporary files for the pack.
|
|
540 |
:param revision_ids: Revision ids to limit the pack to.
|
|
541 |
"""
|
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
542 |
self.packs = packs |
543 |
self.suffix = suffix |
|
544 |
self.revision_ids = revision_ids |
|
|
2951.2.1
by Robert Collins
Factor out revision text copying in Packer to a single helper method. |
545 |
# The pack object we are creating.
|
546 |
self.new_pack = None |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
547 |
self._pack_collection = pack_collection |
|
2951.2.1
by Robert Collins
Factor out revision text copying in Packer to a single helper method. |
548 |
# The index layer keys for the revisions being copied. None for 'all
|
549 |
# objects'.
|
|
550 |
self._revision_keys = None |
|
|
2951.2.2
by Robert Collins
Factor out inventory text copying in Packer to a single helper method. |
551 |
# What text keys to copy. None for 'all texts'. This is set by
|
552 |
# _copy_inventory_texts
|
|
553 |
self._text_filter = None |
|
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
554 |
self._extra_init() |
555 |
||
556 |
def _extra_init(self): |
|
557 |
"""A template hook to allow extending the constructor trivially.""" |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
558 |
|
|
2951.1.3
by Robert Collins
Partial support for native reconcile with packs. |
559 |
def pack(self, pb=None): |
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
560 |
"""Create a new pack by reading data from other packs. |
561 |
||
562 |
This does little more than a bulk copy of data. One key difference
|
|
563 |
is that data with the same item key across multiple packs is elided
|
|
564 |
from the output. The new pack is written into the current pack store
|
|
565 |
along with its indices, and the name added to the pack names. The
|
|
|
2592.3.182
by Robert Collins
Eliminate the need to use a transport,name tuple to represent a pack during fetch. |
566 |
source packs are not altered and are not required to be in the current
|
567 |
pack collection.
|
|
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
568 |
|
|
2951.1.3
by Robert Collins
Partial support for native reconcile with packs. |
569 |
:param pb: An optional progress bar to use. A nested bar is created if
|
570 |
this is None.
|
|
|
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
571 |
:return: A Pack object, or None if nothing was copied.
|
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
572 |
"""
|
573 |
# open a pack - using the same name as the last temporary file
|
|
574 |
# - which has already been flushed, so its safe.
|
|
575 |
# XXX: - duplicate code warning with start_write_group; fix before
|
|
576 |
# considering 'done'.
|
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
577 |
if self._pack_collection._new_pack is not None: |
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
578 |
raise errors.BzrError('call to create_pack_from_packs while ' |
579 |
'another pack is being written.') |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
580 |
if self.revision_ids is not None: |
581 |
if len(self.revision_ids) == 0: |
|
|
2947.1.3
by Robert Collins
Unbreak autopack. Doh. |
582 |
# silly fetch request.
|
583 |
return None |
|
584 |
else: |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
585 |
self.revision_ids = frozenset(self.revision_ids) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
586 |
self.revision_keys = frozenset((revid,) for revid in |
587 |
self.revision_ids) |
|
|
2951.1.3
by Robert Collins
Partial support for native reconcile with packs. |
588 |
if pb is None: |
589 |
self.pb = ui.ui_factory.nested_progress_bar() |
|
590 |
else: |
|
591 |
self.pb = pb |
|
|
2592.6.11
by Robert Collins
* A progress bar has been added for knitpack -> knitpack fetching. |
592 |
try: |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
593 |
return self._create_pack_from_packs() |
|
2592.6.11
by Robert Collins
* A progress bar has been added for knitpack -> knitpack fetching. |
594 |
finally: |
|
2951.1.3
by Robert Collins
Partial support for native reconcile with packs. |
595 |
if pb is None: |
596 |
self.pb.finished() |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
597 |
|
598 |
def open_pack(self): |
|
599 |
"""Open a pack for the pack we are creating.""" |
|
600 |
return NewPack(self._pack_collection._upload_transport, |
|
601 |
self._pack_collection._index_transport, |
|
|
3010.1.11
by Robert Collins
Provide file modes to files created by pack repositories |
602 |
self._pack_collection._pack_transport, upload_suffix=self.suffix, |
|
3416.2.2
by Martin Pool
Change some callers to get file and directory permissions from bzrdir not LockableFiles |
603 |
file_mode=self._pack_collection.repo.bzrdir._get_file_mode()) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
604 |
|
|
2951.2.1
by Robert Collins
Factor out revision text copying in Packer to a single helper method. |
605 |
def _copy_revision_texts(self): |
606 |
"""Copy revision data to the new pack.""" |
|
607 |
# select revisions
|
|
608 |
if self.revision_ids: |
|
609 |
revision_keys = [(revision_id,) for revision_id in self.revision_ids] |
|
610 |
else: |
|
611 |
revision_keys = None |
|
612 |
# select revision keys
|
|
613 |
revision_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list( |
|
614 |
self.packs, 'revision_index')[0] |
|
615 |
revision_nodes = self._pack_collection._index_contents(revision_index_map, revision_keys) |
|
616 |
# copy revision keys and adjust values
|
|
617 |
self.pb.update("Copying revision texts", 1) |
|
|
3070.1.2
by John Arbash Meinel
Cleanup OptimizingPacker code according to my review feedback |
618 |
total_items, readv_group_iter = self._revision_node_readv(revision_nodes) |
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
619 |
list(self._copy_nodes_graph(revision_index_map, self.new_pack._writer, |
620 |
self.new_pack.revision_index, readv_group_iter, total_items)) |
|
|
2951.2.1
by Robert Collins
Factor out revision text copying in Packer to a single helper method. |
621 |
if 'pack' in debug.debug_flags: |
622 |
mutter('%s: create_pack: revisions copied: %s%s %d items t+%6.3fs', |
|
623 |
time.ctime(), self._pack_collection._upload_transport.base, |
|
624 |
self.new_pack.random_name, |
|
625 |
self.new_pack.revision_index.key_count(), |
|
626 |
time.time() - self.new_pack.start_time) |
|
627 |
self._revision_keys = revision_keys |
|
628 |
||
|
2951.2.2
by Robert Collins
Factor out inventory text copying in Packer to a single helper method. |
629 |
def _copy_inventory_texts(self): |
630 |
"""Copy the inventory texts to the new pack. |
|
631 |
||
632 |
self._revision_keys is used to determine what inventories to copy.
|
|
633 |
||
634 |
Sets self._text_filter appropriately.
|
|
635 |
"""
|
|
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
636 |
# select inventory keys
|
|
2951.2.1
by Robert Collins
Factor out revision text copying in Packer to a single helper method. |
637 |
inv_keys = self._revision_keys # currently the same keyspace, and note that |
|
2592.3.145
by Robert Collins
Fix test_fetch_missing_text_other_location_fails for pack repositories. |
638 |
# querying for keys here could introduce a bug where an inventory item
|
639 |
# is missed, so do not change it to query separately without cross
|
|
640 |
# checking like the text key check below.
|
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
641 |
inventory_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list( |
642 |
self.packs, 'inventory_index')[0] |
|
643 |
inv_nodes = self._pack_collection._index_contents(inventory_index_map, inv_keys) |
|
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
644 |
# copy inventory keys and adjust values
|
|
2592.3.104
by Robert Collins
hackish fix, but all tests passing again. |
645 |
# XXX: Should be a helper function to allow different inv representation
|
646 |
# at this point.
|
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
647 |
self.pb.update("Copying inventory texts", 2) |
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
648 |
total_items, readv_group_iter = self._least_readv_node_readv(inv_nodes) |
|
3253.1.1
by John Arbash Meinel
Reduce memory consumption during autopack. |
649 |
# Only grab the output lines if we will be processing them
|
650 |
output_lines = bool(self.revision_ids) |
|
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
651 |
inv_lines = self._copy_nodes_graph(inventory_index_map, |
652 |
self.new_pack._writer, self.new_pack.inventory_index, |
|
|
3253.1.1
by John Arbash Meinel
Reduce memory consumption during autopack. |
653 |
readv_group_iter, total_items, output_lines=output_lines) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
654 |
if self.revision_ids: |
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
655 |
self._process_inventory_lines(inv_lines) |
|
2592.3.110
by Robert Collins
Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching. |
656 |
else: |
|
2592.3.145
by Robert Collins
Fix test_fetch_missing_text_other_location_fails for pack repositories. |
657 |
# eat the iterator to cause it to execute.
|
|
2592.3.110
by Robert Collins
Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching. |
658 |
list(inv_lines) |
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
659 |
self._text_filter = None |
|
2592.3.234
by Martin Pool
Use -Dpack not -Dfetch for pack traces |
660 |
if 'pack' in debug.debug_flags: |
|
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
661 |
mutter('%s: create_pack: inventories copied: %s%s %d items t+%6.3fs', |
|
2951.2.2
by Robert Collins
Factor out inventory text copying in Packer to a single helper method. |
662 |
time.ctime(), self._pack_collection._upload_transport.base, |
663 |
self.new_pack.random_name, |
|
664 |
self.new_pack.inventory_index.key_count(), |
|
|
3231.3.1
by James Westby
Make -Dpack not cause a error trying to use an unkown variable. |
665 |
time.time() - self.new_pack.start_time) |
|
2951.2.2
by Robert Collins
Factor out inventory text copying in Packer to a single helper method. |
666 |
|
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
667 |
def _copy_text_texts(self): |
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
668 |
# select text keys
|
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
669 |
text_index_map, text_nodes = self._get_text_nodes() |
|
2951.2.2
by Robert Collins
Factor out inventory text copying in Packer to a single helper method. |
670 |
if self._text_filter is not None: |
|
2592.3.149
by Robert Collins
Unbreak pack to pack fetching properly, with missing-text detection really working. |
671 |
# We could return the keys copied as part of the return value from
|
672 |
# _copy_nodes_graph but this doesn't work all that well with the
|
|
673 |
# need to get line output too, so we check separately, and as we're
|
|
674 |
# going to buffer everything anyway, we check beforehand, which
|
|
675 |
# saves reading knit data over the wire when we know there are
|
|
676 |
# mising records.
|
|
677 |
text_nodes = set(text_nodes) |
|
678 |
present_text_keys = set(_node[1] for _node in text_nodes) |
|
|
2951.2.2
by Robert Collins
Factor out inventory text copying in Packer to a single helper method. |
679 |
missing_text_keys = set(self._text_filter) - present_text_keys |
|
2592.3.149
by Robert Collins
Unbreak pack to pack fetching properly, with missing-text detection really working. |
680 |
if missing_text_keys: |
681 |
# TODO: raise a specific error that can handle many missing
|
|
682 |
# keys.
|
|
683 |
a_missing_key = missing_text_keys.pop() |
|
684 |
raise errors.RevisionNotPresent(a_missing_key[1], |
|
685 |
a_missing_key[0]) |
|
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
686 |
# copy text keys and adjust values
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
687 |
self.pb.update("Copying content texts", 3) |
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
688 |
total_items, readv_group_iter = self._least_readv_node_readv(text_nodes) |
689 |
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer, |
|
690 |
self.new_pack.text_index, readv_group_iter, total_items)) |
|
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
691 |
self._log_copied_texts() |
692 |
||
|
3035.2.6
by John Arbash Meinel
Suggested by Robert: Move the missing externals check into part of Packer.pack() |
693 |
def _check_references(self): |
694 |
"""Make sure our external refereneces are present.""" |
|
695 |
external_refs = self.new_pack._external_compression_parents_of_texts() |
|
696 |
if external_refs: |
|
697 |
index = self._pack_collection.text_index.combined_index |
|
698 |
found_items = list(index.iter_entries(external_refs)) |
|
699 |
if len(found_items) != len(external_refs): |
|
700 |
found_keys = set(k for idx, k, refs, value in found_items) |
|
701 |
missing_items = external_refs - found_keys |
|
702 |
missing_file_id, missing_revision_id = missing_items.pop() |
|
703 |
raise errors.RevisionNotPresent(missing_revision_id, |
|
704 |
missing_file_id) |
|
705 |
||
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
706 |
def _create_pack_from_packs(self): |
707 |
self.pb.update("Opening pack", 0, 5) |
|
708 |
self.new_pack = self.open_pack() |
|
709 |
new_pack = self.new_pack |
|
710 |
# buffer data - we won't be reading-back during the pack creation and
|
|
711 |
# this makes a significant difference on sftp pushes.
|
|
712 |
new_pack.set_write_cache_size(1024*1024) |
|
|
2592.3.234
by Martin Pool
Use -Dpack not -Dfetch for pack traces |
713 |
if 'pack' in debug.debug_flags: |
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
714 |
plain_pack_list = ['%s%s' % (a_pack.pack_transport.base, a_pack.name) |
715 |
for a_pack in self.packs] |
|
716 |
if self.revision_ids is not None: |
|
717 |
rev_count = len(self.revision_ids) |
|
718 |
else: |
|
719 |
rev_count = 'all' |
|
720 |
mutter('%s: create_pack: creating pack from source packs: ' |
|
721 |
'%s%s %s revisions wanted %s t=0', |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
722 |
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name, |
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
723 |
plain_pack_list, rev_count) |
724 |
self._copy_revision_texts() |
|
725 |
self._copy_inventory_texts() |
|
726 |
self._copy_text_texts() |
|
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
727 |
# select signature keys
|
|
2951.2.1
by Robert Collins
Factor out revision text copying in Packer to a single helper method. |
728 |
signature_filter = self._revision_keys # same keyspace |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
729 |
signature_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list( |
730 |
self.packs, 'signature_index')[0] |
|
731 |
signature_nodes = self._pack_collection._index_contents(signature_index_map, |
|
|
2592.3.110
by Robert Collins
Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching. |
732 |
signature_filter) |
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
733 |
# copy signature keys and adjust values
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
734 |
self.pb.update("Copying signature texts", 4) |
|
2592.3.205
by Robert Collins
Move the pack ContainerWriter instance into NewPack. |
735 |
self._copy_nodes(signature_nodes, signature_index_map, new_pack._writer, |
736 |
new_pack.signature_index) |
|
|
2592.3.234
by Martin Pool
Use -Dpack not -Dfetch for pack traces |
737 |
if 'pack' in debug.debug_flags: |
|
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
738 |
mutter('%s: create_pack: revision signatures copied: %s%s %d items t+%6.3fs', |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
739 |
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name, |
|
2592.3.197
by Robert Collins
Hand over signature index creation to NewPack. |
740 |
new_pack.signature_index.key_count(), |
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
741 |
time.time() - new_pack.start_time) |
|
3035.2.6
by John Arbash Meinel
Suggested by Robert: Move the missing externals check into part of Packer.pack() |
742 |
self._check_references() |
|
2951.2.8
by Robert Collins
Test that reconciling a repository can be done twice in a row. |
743 |
if not self._use_pack(new_pack): |
|
2592.3.203
by Robert Collins
Teach NewPack how to buffer for pack operations. |
744 |
new_pack.abort() |
745 |
return None |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
746 |
self.pb.update("Finishing pack", 5) |
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
747 |
new_pack.finish() |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
748 |
self._pack_collection.allocate(new_pack) |
|
2592.3.206
by Robert Collins
Move pack rename-into-place into NewPack.finish and document hash-collision cases somewhat better. |
749 |
return new_pack |
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
750 |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
751 |
def _copy_nodes(self, nodes, index_map, writer, write_index): |
752 |
"""Copy knit nodes between packs with no graph references.""" |
|
753 |
pb = ui.ui_factory.nested_progress_bar() |
|
754 |
try: |
|
755 |
return self._do_copy_nodes(nodes, index_map, writer, |
|
756 |
write_index, pb) |
|
757 |
finally: |
|
758 |
pb.finished() |
|
759 |
||
760 |
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb): |
|
761 |
# for record verification
|
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
762 |
knit = KnitVersionedFiles(None, None) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
763 |
# plan a readv on each source pack:
|
764 |
# group by pack
|
|
765 |
nodes = sorted(nodes) |
|
766 |
# how to map this into knit.py - or knit.py into this?
|
|
767 |
# we don't want the typical knit logic, we want grouping by pack
|
|
768 |
# at this point - perhaps a helper library for the following code
|
|
769 |
# duplication points?
|
|
770 |
request_groups = {} |
|
771 |
for index, key, value in nodes: |
|
772 |
if index not in request_groups: |
|
773 |
request_groups[index] = [] |
|
774 |
request_groups[index].append((key, value)) |
|
775 |
record_index = 0 |
|
776 |
pb.update("Copied record", record_index, len(nodes)) |
|
777 |
for index, items in request_groups.iteritems(): |
|
778 |
pack_readv_requests = [] |
|
779 |
for key, value in items: |
|
780 |
# ---- KnitGraphIndex.get_position
|
|
781 |
bits = value[1:].split(' ') |
|
782 |
offset, length = int(bits[0]), int(bits[1]) |
|
783 |
pack_readv_requests.append((offset, length, (key, value[0]))) |
|
784 |
# linear scan up the pack
|
|
785 |
pack_readv_requests.sort() |
|
786 |
# copy the data
|
|
787 |
transport, path = index_map[index] |
|
788 |
reader = pack.make_readv_reader(transport, path, |
|
789 |
[offset[0:2] for offset in pack_readv_requests]) |
|
790 |
for (names, read_func), (_1, _2, (key, eol_flag)) in \ |
|
791 |
izip(reader.iter_records(), pack_readv_requests): |
|
792 |
raw_data = read_func(None) |
|
793 |
# check the header only
|
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
794 |
df, _ = knit._parse_record_header(key, raw_data) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
795 |
df.close() |
796 |
pos, size = writer.add_bytes_record(raw_data, names) |
|
797 |
write_index.add_node(key, eol_flag + "%d %d" % (pos, size)) |
|
798 |
pb.update("Copied record", record_index) |
|
799 |
record_index += 1 |
|
800 |
||
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
801 |
def _copy_nodes_graph(self, index_map, writer, write_index, |
802 |
readv_group_iter, total_items, output_lines=False): |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
803 |
"""Copy knit nodes between packs. |
804 |
||
805 |
:param output_lines: Return lines present in the copied data as
|
|
|
2975.3.1
by Robert Collins
Change (without backwards compatibility) the |
806 |
an iterator of line,version_id.
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
807 |
"""
|
808 |
pb = ui.ui_factory.nested_progress_bar() |
|
809 |
try: |
|
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
810 |
for result in self._do_copy_nodes_graph(index_map, writer, |
811 |
write_index, output_lines, pb, readv_group_iter, total_items): |
|
|
3039.1.1
by Robert Collins
(robertc) Fix the text progress for pack to pack fetches. (Robert Collins). |
812 |
yield result |
|
3039.1.2
by Robert Collins
python2.4 'compatibility'. |
813 |
except Exception: |
|
3039.1.3
by Robert Collins
Document the try:except:else: rather than a finally: in pack_repo.._copy_nodes_graph. |
814 |
# Python 2.4 does not permit try:finally: in a generator.
|
|
3039.1.2
by Robert Collins
python2.4 'compatibility'. |
815 |
pb.finished() |
816 |
raise
|
|
817 |
else: |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
818 |
pb.finished() |
819 |
||
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
820 |
def _do_copy_nodes_graph(self, index_map, writer, write_index, |
821 |
output_lines, pb, readv_group_iter, total_items): |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
822 |
# for record verification
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
823 |
knit = KnitVersionedFiles(None, None) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
824 |
# for line extraction when requested (inventories only)
|
825 |
if output_lines: |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
826 |
factory = KnitPlainFactory() |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
827 |
record_index = 0 |
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
828 |
pb.update("Copied record", record_index, total_items) |
829 |
for index, readv_vector, node_vector in readv_group_iter: |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
830 |
# copy the data
|
831 |
transport, path = index_map[index] |
|
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
832 |
reader = pack.make_readv_reader(transport, path, readv_vector) |
833 |
for (names, read_func), (key, eol_flag, references) in \ |
|
834 |
izip(reader.iter_records(), node_vector): |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
835 |
raw_data = read_func(None) |
836 |
if output_lines: |
|
837 |
# read the entire thing
|
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
838 |
content, _ = knit._parse_record(key[-1], raw_data) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
839 |
if len(references[-1]) == 0: |
840 |
line_iterator = factory.get_fulltext_content(content) |
|
841 |
else: |
|
842 |
line_iterator = factory.get_linedelta_content(content) |
|
843 |
for line in line_iterator: |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
844 |
yield line, key |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
845 |
else: |
846 |
# check the header only
|
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
847 |
df, _ = knit._parse_record_header(key, raw_data) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
848 |
df.close() |
849 |
pos, size = writer.add_bytes_record(raw_data, names) |
|
850 |
write_index.add_node(key, eol_flag + "%d %d" % (pos, size), references) |
|
851 |
pb.update("Copied record", record_index) |
|
852 |
record_index += 1 |
|
853 |
||
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
854 |
def _get_text_nodes(self): |
855 |
text_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list( |
|
856 |
self.packs, 'text_index')[0] |
|
857 |
return text_index_map, self._pack_collection._index_contents(text_index_map, |
|
858 |
self._text_filter) |
|
859 |
||
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
860 |
def _least_readv_node_readv(self, nodes): |
861 |
"""Generate request groups for nodes using the least readv's. |
|
862 |
|
|
863 |
:param nodes: An iterable of graph index nodes.
|
|
864 |
:return: Total node count and an iterator of the data needed to perform
|
|
865 |
readvs to obtain the data for nodes. Each item yielded by the
|
|
866 |
iterator is a tuple with:
|
|
867 |
index, readv_vector, node_vector. readv_vector is a list ready to
|
|
868 |
hand to the transport readv method, and node_vector is a list of
|
|
869 |
(key, eol_flag, references) for the the node retrieved by the
|
|
870 |
matching readv_vector.
|
|
871 |
"""
|
|
872 |
# group by pack so we do one readv per pack
|
|
873 |
nodes = sorted(nodes) |
|
874 |
total = len(nodes) |
|
875 |
request_groups = {} |
|
876 |
for index, key, value, references in nodes: |
|
877 |
if index not in request_groups: |
|
878 |
request_groups[index] = [] |
|
879 |
request_groups[index].append((key, value, references)) |
|
880 |
result = [] |
|
881 |
for index, items in request_groups.iteritems(): |
|
882 |
pack_readv_requests = [] |
|
883 |
for key, value, references in items: |
|
884 |
# ---- KnitGraphIndex.get_position
|
|
885 |
bits = value[1:].split(' ') |
|
886 |
offset, length = int(bits[0]), int(bits[1]) |
|
887 |
pack_readv_requests.append( |
|
888 |
((offset, length), (key, value[0], references))) |
|
889 |
# linear scan up the pack to maximum range combining.
|
|
890 |
pack_readv_requests.sort() |
|
891 |
# split out the readv and the node data.
|
|
892 |
pack_readv = [readv for readv, node in pack_readv_requests] |
|
893 |
node_vector = [node for readv, node in pack_readv_requests] |
|
894 |
result.append((index, pack_readv, node_vector)) |
|
895 |
return total, result |
|
896 |
||
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
897 |
def _log_copied_texts(self): |
898 |
if 'pack' in debug.debug_flags: |
|
899 |
mutter('%s: create_pack: file texts copied: %s%s %d items t+%6.3fs', |
|
900 |
time.ctime(), self._pack_collection._upload_transport.base, |
|
901 |
self.new_pack.random_name, |
|
902 |
self.new_pack.text_index.key_count(), |
|
903 |
time.time() - self.new_pack.start_time) |
|
904 |
||
905 |
def _process_inventory_lines(self, inv_lines): |
|
906 |
"""Use up the inv_lines generator and setup a text key filter.""" |
|
907 |
repo = self._pack_collection.repo |
|
908 |
fileid_revisions = repo._find_file_ids_from_xml_inventory_lines( |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
909 |
inv_lines, self.revision_keys) |
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
910 |
text_filter = [] |
911 |
for fileid, file_revids in fileid_revisions.iteritems(): |
|
912 |
text_filter.extend([(fileid, file_revid) for file_revid in file_revids]) |
|
913 |
self._text_filter = text_filter |
|
914 |
||
|
3070.1.2
by John Arbash Meinel
Cleanup OptimizingPacker code according to my review feedback |
915 |
def _revision_node_readv(self, revision_nodes): |
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
916 |
"""Return the total revisions and the readv's to issue. |
917 |
||
918 |
:param revision_nodes: The revision index contents for the packs being
|
|
919 |
incorporated into the new pack.
|
|
920 |
:return: As per _least_readv_node_readv.
|
|
921 |
"""
|
|
922 |
return self._least_readv_node_readv(revision_nodes) |
|
923 |
||
|
2951.2.8
by Robert Collins
Test that reconciling a repository can be done twice in a row. |
924 |
def _use_pack(self, new_pack): |
925 |
"""Return True if new_pack should be used. |
|
926 |
||
927 |
:param new_pack: The pack that has just been created.
|
|
928 |
:return: True if the pack should be used.
|
|
929 |
"""
|
|
930 |
return new_pack.data_inserted() |
|
931 |
||
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
932 |
|
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
933 |
class OptimisingPacker(Packer): |
934 |
"""A packer which spends more time to create better disk layouts.""" |
|
935 |
||
|
3070.1.2
by John Arbash Meinel
Cleanup OptimizingPacker code according to my review feedback |
936 |
def _revision_node_readv(self, revision_nodes): |
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
937 |
"""Return the total revisions and the readv's to issue. |
938 |
||
939 |
This sort places revisions in topological order with the ancestors
|
|
940 |
after the children.
|
|
941 |
||
942 |
:param revision_nodes: The revision index contents for the packs being
|
|
943 |
incorporated into the new pack.
|
|
944 |
:return: As per _least_readv_node_readv.
|
|
945 |
"""
|
|
946 |
# build an ancestors dict
|
|
947 |
ancestors = {} |
|
948 |
by_key = {} |
|
949 |
for index, key, value, references in revision_nodes: |
|
950 |
ancestors[key] = references[0] |
|
951 |
by_key[key] = (index, value, references) |
|
952 |
order = tsort.topo_sort(ancestors) |
|
953 |
total = len(order) |
|
954 |
# Single IO is pathological, but it will work as a starting point.
|
|
955 |
requests = [] |
|
956 |
for key in reversed(order): |
|
957 |
index, value, references = by_key[key] |
|
958 |
# ---- KnitGraphIndex.get_position
|
|
959 |
bits = value[1:].split(' ') |
|
960 |
offset, length = int(bits[0]), int(bits[1]) |
|
961 |
requests.append( |
|
962 |
(index, [(offset, length)], [(key, value[0], references)])) |
|
963 |
# TODO: combine requests in the same index that are in ascending order.
|
|
964 |
return total, requests |
|
965 |
||
966 |
||
|
2951.1.3
by Robert Collins
Partial support for native reconcile with packs. |
967 |
class ReconcilePacker(Packer): |
968 |
"""A packer which regenerates indices etc as it copies. |
|
969 |
|
|
970 |
This is used by ``bzr reconcile`` to cause parent text pointers to be
|
|
971 |
regenerated.
|
|
972 |
"""
|
|
973 |
||
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
974 |
def _extra_init(self): |
975 |
self._data_changed = False |
|
976 |
||
977 |
def _process_inventory_lines(self, inv_lines): |
|
978 |
"""Generate a text key reference map rather for reconciling with.""" |
|
979 |
repo = self._pack_collection.repo |
|
980 |
refs = repo._find_text_key_references_from_xml_inventory_lines( |
|
981 |
inv_lines) |
|
982 |
self._text_refs = refs |
|
983 |
# during reconcile we:
|
|
984 |
# - convert unreferenced texts to full texts
|
|
985 |
# - correct texts which reference a text not copied to be full texts
|
|
986 |
# - copy all others as-is but with corrected parents.
|
|
987 |
# - so at this point we don't know enough to decide what becomes a full
|
|
988 |
# text.
|
|
989 |
self._text_filter = None |
|
990 |
||
991 |
def _copy_text_texts(self): |
|
992 |
"""generate what texts we should have and then copy.""" |
|
993 |
self.pb.update("Copying content texts", 3) |
|
994 |
# we have three major tasks here:
|
|
995 |
# 1) generate the ideal index
|
|
996 |
repo = self._pack_collection.repo |
|
|
3063.2.1
by Robert Collins
Solve reconciling erroring when multiple portions of a single delta chain are being reinserted. |
997 |
ancestors = dict([(key[0], tuple(ref[0] for ref in refs[0])) for |
|
3063.2.2
by Robert Collins
Review feedback. |
998 |
_1, key, _2, refs in |
|
3063.2.1
by Robert Collins
Solve reconciling erroring when multiple portions of a single delta chain are being reinserted. |
999 |
self.new_pack.revision_index.iter_all_entries()]) |
1000 |
ideal_index = repo._generate_text_key_index(self._text_refs, ancestors) |
|
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
1001 |
# 2) generate a text_nodes list that contains all the deltas that can
|
1002 |
# be used as-is, with corrected parents.
|
|
1003 |
ok_nodes = [] |
|
1004 |
bad_texts = [] |
|
1005 |
discarded_nodes = [] |
|
1006 |
NULL_REVISION = _mod_revision.NULL_REVISION |
|
1007 |
text_index_map, text_nodes = self._get_text_nodes() |
|
1008 |
for node in text_nodes: |
|
1009 |
# 0 - index
|
|
1010 |
# 1 - key
|
|
1011 |
# 2 - value
|
|
1012 |
# 3 - refs
|
|
1013 |
try: |
|
1014 |
ideal_parents = tuple(ideal_index[node[1]]) |
|
1015 |
except KeyError: |
|
1016 |
discarded_nodes.append(node) |
|
1017 |
self._data_changed = True |
|
1018 |
else: |
|
1019 |
if ideal_parents == (NULL_REVISION,): |
|
1020 |
ideal_parents = () |
|
1021 |
if ideal_parents == node[3][0]: |
|
1022 |
# no change needed.
|
|
1023 |
ok_nodes.append(node) |
|
1024 |
elif ideal_parents[0:1] == node[3][0][0:1]: |
|
1025 |
# the left most parent is the same, or there are no parents
|
|
1026 |
# today. Either way, we can preserve the representation as
|
|
1027 |
# long as we change the refs to be inserted.
|
|
1028 |
self._data_changed = True |
|
1029 |
ok_nodes.append((node[0], node[1], node[2], |
|
1030 |
(ideal_parents, node[3][1]))) |
|
1031 |
self._data_changed = True |
|
1032 |
else: |
|
1033 |
# Reinsert this text completely
|
|
1034 |
bad_texts.append((node[1], ideal_parents)) |
|
1035 |
self._data_changed = True |
|
1036 |
# we're finished with some data.
|
|
1037 |
del ideal_index |
|
1038 |
del text_nodes |
|
|
3063.2.2
by Robert Collins
Review feedback. |
1039 |
# 3) bulk copy the ok data
|
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
1040 |
total_items, readv_group_iter = self._least_readv_node_readv(ok_nodes) |
1041 |
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer, |
|
1042 |
self.new_pack.text_index, readv_group_iter, total_items)) |
|
|
3063.2.1
by Robert Collins
Solve reconciling erroring when multiple portions of a single delta chain are being reinserted. |
1043 |
# 4) adhoc copy all the other texts.
|
1044 |
# We have to topologically insert all texts otherwise we can fail to
|
|
1045 |
# reconcile when parts of a single delta chain are preserved intact,
|
|
1046 |
# and other parts are not. E.g. Discarded->d1->d2->d3. d1 will be
|
|
1047 |
# reinserted, and if d3 has incorrect parents it will also be
|
|
1048 |
# reinserted. If we insert d3 first, d2 is present (as it was bulk
|
|
1049 |
# copied), so we will try to delta, but d2 is not currently able to be
|
|
1050 |
# extracted because it's basis d1 is not present. Topologically sorting
|
|
1051 |
# addresses this. The following generates a sort for all the texts that
|
|
1052 |
# are being inserted without having to reference the entire text key
|
|
1053 |
# space (we only topo sort the revisions, which is smaller).
|
|
1054 |
topo_order = tsort.topo_sort(ancestors) |
|
1055 |
rev_order = dict(zip(topo_order, range(len(topo_order)))) |
|
1056 |
bad_texts.sort(key=lambda key:rev_order[key[0][1]]) |
|
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
1057 |
transaction = repo.get_transaction() |
1058 |
file_id_index = GraphIndexPrefixAdapter( |
|
1059 |
self.new_pack.text_index, |
|
1060 |
('blank', ), 1, |
|
1061 |
add_nodes_callback=self.new_pack.text_index.add_nodes) |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1062 |
data_access = _DirectPackAccess( |
1063 |
{self.new_pack.text_index:self.new_pack.access_tuple()}) |
|
1064 |
data_access.set_writer(self.new_pack._writer, self.new_pack.text_index, |
|
1065 |
self.new_pack.access_tuple()) |
|
1066 |
output_texts = KnitVersionedFiles( |
|
1067 |
_KnitGraphIndex(self.new_pack.text_index, |
|
1068 |
add_callback=self.new_pack.text_index.add_nodes, |
|
1069 |
deltas=True, parents=True, is_locked=repo.is_locked), |
|
1070 |
data_access=data_access, max_delta_chain=200) |
|
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
1071 |
for key, parent_keys in bad_texts: |
1072 |
# We refer to the new pack to delta data being output.
|
|
1073 |
# A possible improvement would be to catch errors on short reads
|
|
1074 |
# and only flush then.
|
|
1075 |
self.new_pack.flush() |
|
1076 |
parents = [] |
|
1077 |
for parent_key in parent_keys: |
|
1078 |
if parent_key[0] != key[0]: |
|
1079 |
# Graph parents must match the fileid
|
|
1080 |
raise errors.BzrError('Mismatched key parent %r:%r' % |
|
1081 |
(key, parent_keys)) |
|
1082 |
parents.append(parent_key[1]) |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1083 |
text_lines = split_lines(repo.texts.get_record_stream( |
1084 |
[key], 'unordered', True).next().get_bytes_as('fulltext')) |
|
1085 |
output_texts.add_lines(key, parent_keys, text_lines, |
|
1086 |
random_id=True, check_content=False) |
|
|
3063.2.2
by Robert Collins
Review feedback. |
1087 |
# 5) check that nothing inserted has a reference outside the keyspace.
|
|
3035.2.5
by John Arbash Meinel
Rename function to remove _new_ (per Robert's suggestion) |
1088 |
missing_text_keys = self.new_pack._external_compression_parents_of_texts() |
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
1089 |
if missing_text_keys: |
1090 |
raise errors.BzrError('Reference to missing compression parents %r' |
|
|
3376.2.12
by Martin Pool
pyflakes corrections (thanks spiv) |
1091 |
% (missing_text_keys,)) |
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
1092 |
self._log_copied_texts() |
1093 |
||
|
2951.2.8
by Robert Collins
Test that reconciling a repository can be done twice in a row. |
1094 |
def _use_pack(self, new_pack): |
1095 |
"""Override _use_pack to check for reconcile having changed content.""" |
|
1096 |
# XXX: we might be better checking this at the copy time.
|
|
1097 |
original_inventory_keys = set() |
|
1098 |
inv_index = self._pack_collection.inventory_index.combined_index |
|
1099 |
for entry in inv_index.iter_all_entries(): |
|
1100 |
original_inventory_keys.add(entry[1]) |
|
1101 |
new_inventory_keys = set() |
|
1102 |
for entry in new_pack.inventory_index.iter_all_entries(): |
|
1103 |
new_inventory_keys.add(entry[1]) |
|
1104 |
if new_inventory_keys != original_inventory_keys: |
|
1105 |
self._data_changed = True |
|
1106 |
return new_pack.data_inserted() and self._data_changed |
|
1107 |
||
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1108 |
|
1109 |
class RepositoryPackCollection(object): |
|
1110 |
"""Management of packs within a repository.""" |
|
1111 |
||
1112 |
def __init__(self, repo, transport, index_transport, upload_transport, |
|
1113 |
pack_transport): |
|
1114 |
"""Create a new RepositoryPackCollection. |
|
1115 |
||
1116 |
:param transport: Addresses the repository base directory
|
|
1117 |
(typically .bzr/repository/).
|
|
1118 |
:param index_transport: Addresses the directory containing indices.
|
|
1119 |
:param upload_transport: Addresses the directory into which packs are written
|
|
1120 |
while they're being created.
|
|
1121 |
:param pack_transport: Addresses the directory of existing complete packs.
|
|
1122 |
"""
|
|
1123 |
self.repo = repo |
|
1124 |
self.transport = transport |
|
1125 |
self._index_transport = index_transport |
|
1126 |
self._upload_transport = upload_transport |
|
1127 |
self._pack_transport = pack_transport |
|
1128 |
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3} |
|
1129 |
self.packs = [] |
|
1130 |
# name:Pack mapping
|
|
1131 |
self._packs_by_name = {} |
|
1132 |
# the previous pack-names content
|
|
1133 |
self._packs_at_load = None |
|
1134 |
# when a pack is being created by this object, the state of that pack.
|
|
1135 |
self._new_pack = None |
|
1136 |
# aggregated revision index data
|
|
1137 |
self.revision_index = AggregateIndex() |
|
1138 |
self.inventory_index = AggregateIndex() |
|
1139 |
self.text_index = AggregateIndex() |
|
1140 |
self.signature_index = AggregateIndex() |
|
1141 |
||
1142 |
def add_pack_to_memory(self, pack): |
|
1143 |
"""Make a Pack object available to the repository to satisfy queries. |
|
1144 |
|
|
1145 |
:param pack: A Pack object.
|
|
1146 |
"""
|
|
|
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
1147 |
if pack.name in self._packs_by_name: |
1148 |
raise AssertionError() |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1149 |
self.packs.append(pack) |
1150 |
self._packs_by_name[pack.name] = pack |
|
1151 |
self.revision_index.add_index(pack.revision_index, pack) |
|
1152 |
self.inventory_index.add_index(pack.inventory_index, pack) |
|
1153 |
self.text_index.add_index(pack.text_index, pack) |
|
1154 |
self.signature_index.add_index(pack.signature_index, pack) |
|
1155 |
||
1156 |
def all_packs(self): |
|
1157 |
"""Return a list of all the Pack objects this repository has. |
|
1158 |
||
1159 |
Note that an in-progress pack being created is not returned.
|
|
1160 |
||
1161 |
:return: A list of Pack objects for all the packs in the repository.
|
|
1162 |
"""
|
|
1163 |
result = [] |
|
1164 |
for name in self.names(): |
|
1165 |
result.append(self.get_pack_by_name(name)) |
|
1166 |
return result |
|
1167 |
||
1168 |
def autopack(self): |
|
1169 |
"""Pack the pack collection incrementally. |
|
1170 |
|
|
1171 |
This will not attempt global reorganisation or recompression,
|
|
1172 |
rather it will just ensure that the total number of packs does
|
|
1173 |
not grow without bound. It uses the _max_pack_count method to
|
|
1174 |
determine if autopacking is needed, and the pack_distribution
|
|
1175 |
method to determine the number of revisions in each pack.
|
|
1176 |
||
1177 |
If autopacking takes place then the packs name collection will have
|
|
1178 |
been flushed to disk - packing requires updating the name collection
|
|
1179 |
in synchronisation with certain steps. Otherwise the names collection
|
|
1180 |
is not flushed.
|
|
1181 |
||
1182 |
:return: True if packing took place.
|
|
1183 |
"""
|
|
1184 |
# XXX: Should not be needed when the management of indices is sane.
|
|
1185 |
total_revisions = self.revision_index.combined_index.key_count() |
|
1186 |
total_packs = len(self._names) |
|
1187 |
if self._max_pack_count(total_revisions) >= total_packs: |
|
1188 |
return False |
|
1189 |
# XXX: the following may want to be a class, to pack with a given
|
|
1190 |
# policy.
|
|
1191 |
mutter('Auto-packing repository %s, which has %d pack files, ' |
|
1192 |
'containing %d revisions into %d packs.', self, total_packs, |
|
1193 |
total_revisions, self._max_pack_count(total_revisions)) |
|
1194 |
# determine which packs need changing
|
|
1195 |
pack_distribution = self.pack_distribution(total_revisions) |
|
1196 |
existing_packs = [] |
|
1197 |
for pack in self.all_packs(): |
|
1198 |
revision_count = pack.get_revision_count() |
|
1199 |
if revision_count == 0: |
|
1200 |
# revision less packs are not generated by normal operation,
|
|
1201 |
# only by operations like sign-my-commits, and thus will not
|
|
1202 |
# tend to grow rapdily or without bound like commit containing
|
|
1203 |
# packs do - leave them alone as packing them really should
|
|
1204 |
# group their data with the relevant commit, and that may
|
|
1205 |
# involve rewriting ancient history - which autopack tries to
|
|
1206 |
# avoid. Alternatively we could not group the data but treat
|
|
1207 |
# each of these as having a single revision, and thus add
|
|
1208 |
# one revision for each to the total revision count, to get
|
|
1209 |
# a matching distribution.
|
|
1210 |
continue
|
|
1211 |
existing_packs.append((revision_count, pack)) |
|
1212 |
pack_operations = self.plan_autopack_combinations( |
|
1213 |
existing_packs, pack_distribution) |
|
1214 |
self._execute_pack_operations(pack_operations) |
|
1215 |
return True |
|
1216 |
||
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
1217 |
def _execute_pack_operations(self, pack_operations, _packer_class=Packer): |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1218 |
"""Execute a series of pack operations. |
1219 |
||
1220 |
:param pack_operations: A list of [revision_count, packs_to_combine].
|
|
|
3070.1.2
by John Arbash Meinel
Cleanup OptimizingPacker code according to my review feedback |
1221 |
:param _packer_class: The class of packer to use (default: Packer).
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1222 |
:return: None.
|
1223 |
"""
|
|
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1224 |
for revision_count, packs in pack_operations: |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1225 |
# we may have no-ops from the setup logic
|
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1226 |
if len(packs) == 0: |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1227 |
continue
|
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
1228 |
_packer_class(self, packs, '.autopack').pack() |
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1229 |
for pack in packs: |
|
2592.3.236
by Martin Pool
Make RepositoryPackCollection.remove_pack_from_memory private |
1230 |
self._remove_pack_from_memory(pack) |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1231 |
# record the newly available packs and stop advertising the old
|
1232 |
# packs
|
|
|
2948.1.1
by Robert Collins
* Obsolete packs are now cleaned up by pack and autopack operations. |
1233 |
self._save_pack_names(clear_obsolete_packs=True) |
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1234 |
# Move the old packs out of the way now they are no longer referenced.
|
1235 |
for revision_count, packs in pack_operations: |
|
1236 |
self._obsolete_packs(packs) |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1237 |
|
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1238 |
def lock_names(self): |
1239 |
"""Acquire the mutex around the pack-names index. |
|
1240 |
|
|
1241 |
This cannot be used in the middle of a read-only transaction on the
|
|
1242 |
repository.
|
|
1243 |
"""
|
|
1244 |
self.repo.control_files.lock_write() |
|
1245 |
||
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1246 |
def pack(self): |
1247 |
"""Pack the pack collection totally.""" |
|
1248 |
self.ensure_loaded() |
|
|
2592.3.213
by Robert Collins
Retain packs and indices in memory within a lock, even when write groups are entered and exited. |
1249 |
total_packs = len(self._names) |
1250 |
if total_packs < 2: |
|
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
1251 |
# This is arguably wrong because we might not be optimal, but for
|
1252 |
# now lets leave it in. (e.g. reconcile -> one pack. But not
|
|
1253 |
# optimal.
|
|
|
2592.3.213
by Robert Collins
Retain packs and indices in memory within a lock, even when write groups are entered and exited. |
1254 |
return
|
1255 |
total_revisions = self.revision_index.combined_index.key_count() |
|
1256 |
# XXX: the following may want to be a class, to pack with a given
|
|
1257 |
# policy.
|
|
1258 |
mutter('Packing repository %s, which has %d pack files, ' |
|
1259 |
'containing %d revisions into 1 packs.', self, total_packs, |
|
1260 |
total_revisions) |
|
1261 |
# determine which packs need changing
|
|
1262 |
pack_distribution = [1] |
|
1263 |
pack_operations = [[0, []]] |
|
1264 |
for pack in self.all_packs(): |
|
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
1265 |
pack_operations[-1][0] += pack.get_revision_count() |
|
2592.3.213
by Robert Collins
Retain packs and indices in memory within a lock, even when write groups are entered and exited. |
1266 |
pack_operations[-1][1].append(pack) |
|
3070.1.1
by Robert Collins
* ``bzr pack`` now orders revision texts in topological order, with newest |
1267 |
self._execute_pack_operations(pack_operations, OptimisingPacker) |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1268 |
|
1269 |
def plan_autopack_combinations(self, existing_packs, pack_distribution): |
|
|
2592.3.176
by Robert Collins
Various pack refactorings. |
1270 |
"""Plan a pack operation. |
1271 |
||
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1272 |
:param existing_packs: The packs to pack. (A list of (revcount, Pack)
|
1273 |
tuples).
|
|
|
2592.3.235
by Martin Pool
Review cleanups |
1274 |
:param pack_distribution: A list with the number of revisions desired
|
|
2592.3.176
by Robert Collins
Various pack refactorings. |
1275 |
in each pack.
|
1276 |
"""
|
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1277 |
if len(existing_packs) <= len(pack_distribution): |
1278 |
return [] |
|
1279 |
existing_packs.sort(reverse=True) |
|
1280 |
pack_operations = [[0, []]] |
|
1281 |
# plan out what packs to keep, and what to reorganise
|
|
1282 |
while len(existing_packs): |
|
1283 |
# take the largest pack, and if its less than the head of the
|
|
1284 |
# distribution chart we will include its contents in the new pack for
|
|
1285 |
# that position. If its larger, we remove its size from the
|
|
1286 |
# distribution chart
|
|
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1287 |
next_pack_rev_count, next_pack = existing_packs.pop(0) |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1288 |
if next_pack_rev_count >= pack_distribution[0]: |
1289 |
# this is already packed 'better' than this, so we can
|
|
1290 |
# not waste time packing it.
|
|
1291 |
while next_pack_rev_count > 0: |
|
1292 |
next_pack_rev_count -= pack_distribution[0] |
|
1293 |
if next_pack_rev_count >= 0: |
|
1294 |
# more to go
|
|
1295 |
del pack_distribution[0] |
|
1296 |
else: |
|
1297 |
# didn't use that entire bucket up
|
|
1298 |
pack_distribution[0] = -next_pack_rev_count |
|
1299 |
else: |
|
1300 |
# add the revisions we're going to add to the next output pack
|
|
1301 |
pack_operations[-1][0] += next_pack_rev_count |
|
1302 |
# allocate this pack to the next pack sub operation
|
|
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1303 |
pack_operations[-1][1].append(next_pack) |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1304 |
if pack_operations[-1][0] >= pack_distribution[0]: |
1305 |
# this pack is used up, shift left.
|
|
1306 |
del pack_distribution[0] |
|
1307 |
pack_operations.append([0, []]) |
|
1308 |
||
1309 |
return pack_operations |
|
1310 |
||
1311 |
def ensure_loaded(self): |
|
|
2592.3.214
by Robert Collins
Merge bzr.dev. |
1312 |
# NB: if you see an assertion error here, its probably access against
|
1313 |
# an unlocked repo. Naughty.
|
|
|
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1314 |
if not self.repo.is_locked(): |
1315 |
raise errors.ObjectNotLocked(self.repo) |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1316 |
if self._names is None: |
|
2592.3.118
by Robert Collins
Record the size of the index files in the pack-names index. |
1317 |
self._names = {} |
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1318 |
self._packs_at_load = set() |
1319 |
for index, key, value in self._iter_disk_pack_index(): |
|
|
2592.3.118
by Robert Collins
Record the size of the index files in the pack-names index. |
1320 |
name = key[0] |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1321 |
self._names[name] = self._parse_index_sizes(value) |
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1322 |
self._packs_at_load.add((key, value)) |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1323 |
# populate all the metadata.
|
1324 |
self.all_packs() |
|
1325 |
||
1326 |
def _parse_index_sizes(self, value): |
|
1327 |
"""Parse a string of index sizes.""" |
|
1328 |
return tuple([int(digits) for digits in value.split(' ')]) |
|
|
2592.3.118
by Robert Collins
Record the size of the index files in the pack-names index. |
1329 |
|
|
2592.3.176
by Robert Collins
Various pack refactorings. |
1330 |
def get_pack_by_name(self, name): |
1331 |
"""Get a Pack object by name. |
|
1332 |
||
1333 |
:param name: The name of the pack - e.g. '123456'
|
|
1334 |
:return: A Pack object.
|
|
1335 |
"""
|
|
1336 |
try: |
|
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1337 |
return self._packs_by_name[name] |
|
2592.3.176
by Robert Collins
Various pack refactorings. |
1338 |
except KeyError: |
1339 |
rev_index = self._make_index(name, '.rix') |
|
1340 |
inv_index = self._make_index(name, '.iix') |
|
1341 |
txt_index = self._make_index(name, '.tix') |
|
1342 |
sig_index = self._make_index(name, '.six') |
|
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
1343 |
result = ExistingPack(self._pack_transport, name, rev_index, |
1344 |
inv_index, txt_index, sig_index) |
|
|
2592.3.178
by Robert Collins
Add pack objects to the api for PackCollection.create_pack_from_packs. |
1345 |
self.add_pack_to_memory(result) |
|
2592.3.176
by Robert Collins
Various pack refactorings. |
1346 |
return result |
1347 |
||
|
2592.3.201
by Robert Collins
Cleanup RepositoryPackCollection.allocate. |
1348 |
def allocate(self, a_new_pack): |
|
2592.3.118
by Robert Collins
Record the size of the index files in the pack-names index. |
1349 |
"""Allocate name in the list of packs. |
1350 |
||
|
2592.3.201
by Robert Collins
Cleanup RepositoryPackCollection.allocate. |
1351 |
:param a_new_pack: A NewPack instance to be added to the collection of
|
1352 |
packs for this repository.
|
|
|
2592.3.118
by Robert Collins
Record the size of the index files in the pack-names index. |
1353 |
"""
|
|
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
1354 |
self.ensure_loaded() |
|
2592.3.201
by Robert Collins
Cleanup RepositoryPackCollection.allocate. |
1355 |
if a_new_pack.name in self._names: |
|
2951.2.7
by Robert Collins
Raise an error on duplicate pack name allocation. |
1356 |
raise errors.BzrError( |
1357 |
'Pack %r already exists in %s' % (a_new_pack.name, self)) |
|
|
2592.3.201
by Robert Collins
Cleanup RepositoryPackCollection.allocate. |
1358 |
self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes) |
1359 |
self.add_pack_to_memory(a_new_pack) |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1360 |
|
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1361 |
def _iter_disk_pack_index(self): |
1362 |
"""Iterate over the contents of the pack-names index. |
|
1363 |
|
|
1364 |
This is used when loading the list from disk, and before writing to
|
|
1365 |
detect updates from others during our write operation.
|
|
1366 |
:return: An iterator of the index contents.
|
|
1367 |
"""
|
|
1368 |
return GraphIndex(self.transport, 'pack-names', None |
|
1369 |
).iter_all_entries() |
|
1370 |
||
|
2592.3.176
by Robert Collins
Various pack refactorings. |
1371 |
def _make_index(self, name, suffix): |
1372 |
size_offset = self._suffix_offsets[suffix] |
|
1373 |
index_name = name + suffix |
|
1374 |
index_size = self._names[name][size_offset] |
|
1375 |
return GraphIndex( |
|
1376 |
self._index_transport, index_name, index_size) |
|
|
2592.5.5
by Martin Pool
Make RepositoryPackCollection remember the index transport, and responsible for getting a map of indexes |
1377 |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1378 |
def _max_pack_count(self, total_revisions): |
1379 |
"""Return the maximum number of packs to use for total revisions. |
|
1380 |
|
|
1381 |
:param total_revisions: The total number of revisions in the
|
|
1382 |
repository.
|
|
1383 |
"""
|
|
1384 |
if not total_revisions: |
|
1385 |
return 1 |
|
1386 |
digits = str(total_revisions) |
|
1387 |
result = 0 |
|
1388 |
for digit in digits: |
|
1389 |
result += int(digit) |
|
1390 |
return result |
|
1391 |
||
1392 |
def names(self): |
|
1393 |
"""Provide an order to the underlying names.""" |
|
|
2592.3.118
by Robert Collins
Record the size of the index files in the pack-names index. |
1394 |
return sorted(self._names.keys()) |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1395 |
|
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1396 |
def _obsolete_packs(self, packs): |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1397 |
"""Move a number of packs which have been obsoleted out of the way. |
1398 |
||
1399 |
Each pack and its associated indices are moved out of the way.
|
|
1400 |
||
1401 |
Note: for correctness this function should only be called after a new
|
|
1402 |
pack names index has been written without these pack names, and with
|
|
1403 |
the names of packs that contain the data previously available via these
|
|
1404 |
packs.
|
|
1405 |
||
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1406 |
:param packs: The packs to obsolete.
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1407 |
:param return: None.
|
1408 |
"""
|
|
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1409 |
for pack in packs: |
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
1410 |
pack.pack_transport.rename(pack.file_name(), |
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1411 |
'../obsolete_packs/' + pack.file_name()) |
|
2592.3.226
by Martin Pool
formatting and docstrings |
1412 |
# TODO: Probably needs to know all possible indices for this pack
|
1413 |
# - or maybe list the directory and move all indices matching this
|
|
|
2592.5.13
by Martin Pool
Clean up duplicate index_transport variables |
1414 |
# name whether we recognize it or not?
|
|
2592.3.187
by Robert Collins
Finish cleaning up the packing logic to take Pack objects - all tests pass. |
1415 |
for suffix in ('.iix', '.six', '.tix', '.rix'): |
1416 |
self._index_transport.rename(pack.name + suffix, |
|
1417 |
'../obsolete_packs/' + pack.name + suffix) |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1418 |
|
1419 |
def pack_distribution(self, total_revisions): |
|
1420 |
"""Generate a list of the number of revisions to put in each pack. |
|
1421 |
||
1422 |
:param total_revisions: The total number of revisions in the
|
|
1423 |
repository.
|
|
1424 |
"""
|
|
1425 |
if total_revisions == 0: |
|
1426 |
return [0] |
|
1427 |
digits = reversed(str(total_revisions)) |
|
1428 |
result = [] |
|
1429 |
for exponent, count in enumerate(digits): |
|
1430 |
size = 10 ** exponent |
|
1431 |
for pos in range(int(count)): |
|
1432 |
result.append(size) |
|
1433 |
return list(reversed(result)) |
|
1434 |
||
|
2592.5.12
by Martin Pool
Move pack_transport and pack_name onto RepositoryPackCollection |
1435 |
def _pack_tuple(self, name): |
1436 |
"""Return a tuple with the transport and file name for a pack name.""" |
|
1437 |
return self._pack_transport, name + '.pack' |
|
1438 |
||
|
2592.3.236
by Martin Pool
Make RepositoryPackCollection.remove_pack_from_memory private |
1439 |
def _remove_pack_from_memory(self, pack): |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1440 |
"""Remove pack from the packs accessed by this repository. |
1441 |
|
|
1442 |
Only affects memory state, until self._save_pack_names() is invoked.
|
|
1443 |
"""
|
|
1444 |
self._names.pop(pack.name) |
|
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1445 |
self._packs_by_name.pop(pack.name) |
|
2592.3.213
by Robert Collins
Retain packs and indices in memory within a lock, even when write groups are entered and exited. |
1446 |
self._remove_pack_indices(pack) |
1447 |
||
1448 |
def _remove_pack_indices(self, pack): |
|
1449 |
"""Remove the indices for pack from the aggregated indices.""" |
|
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1450 |
self.revision_index.remove_index(pack.revision_index, pack) |
|
2592.3.211
by Robert Collins
Pack inventory index management cleaned up. |
1451 |
self.inventory_index.remove_index(pack.inventory_index, pack) |
|
2592.3.212
by Robert Collins
Cleanup text index management in packs. |
1452 |
self.text_index.remove_index(pack.text_index, pack) |
|
2592.3.210
by Robert Collins
Signature index management looking sane for packs. |
1453 |
self.signature_index.remove_index(pack.signature_index, pack) |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1454 |
|
1455 |
def reset(self): |
|
|
2592.3.190
by Robert Collins
Move flush and reset operations to the pack collection rather than the thunk layers. |
1456 |
"""Clear all cached data.""" |
1457 |
# cached revision data
|
|
1458 |
self.repo._revision_knit = None |
|
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1459 |
self.revision_index.clear() |
|
2592.3.190
by Robert Collins
Move flush and reset operations to the pack collection rather than the thunk layers. |
1460 |
# cached signature data
|
1461 |
self.repo._signature_knit = None |
|
|
2592.3.210
by Robert Collins
Signature index management looking sane for packs. |
1462 |
self.signature_index.clear() |
|
2592.3.212
by Robert Collins
Cleanup text index management in packs. |
1463 |
# cached file text data
|
1464 |
self.text_index.clear() |
|
|
2592.3.190
by Robert Collins
Move flush and reset operations to the pack collection rather than the thunk layers. |
1465 |
self.repo._text_knit = None |
|
2592.3.211
by Robert Collins
Pack inventory index management cleaned up. |
1466 |
# cached inventory data
|
1467 |
self.inventory_index.clear() |
|
|
2592.3.192
by Robert Collins
Move new revision index management to NewPack. |
1468 |
# remove the open pack
|
1469 |
self._new_pack = None |
|
|
2592.3.190
by Robert Collins
Move flush and reset operations to the pack collection rather than the thunk layers. |
1470 |
# information about packs.
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1471 |
self._names = None |
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
1472 |
self.packs = [] |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1473 |
self._packs_by_name = {} |
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1474 |
self._packs_at_load = None |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1475 |
|
|
2592.3.207
by Robert Collins
Start removing the dependency on RepositoryPackCollection._make_index_map. |
1476 |
def _make_index_map(self, index_suffix): |
|
2592.3.226
by Martin Pool
formatting and docstrings |
1477 |
"""Return information on existing indices. |
|
2592.3.207
by Robert Collins
Start removing the dependency on RepositoryPackCollection._make_index_map. |
1478 |
|
1479 |
:param suffix: Index suffix added to pack name.
|
|
1480 |
||
1481 |
:returns: (pack_map, indices) where indices is a list of GraphIndex
|
|
1482 |
objects, and pack_map is a mapping from those objects to the
|
|
1483 |
pack tuple they describe.
|
|
1484 |
"""
|
|
1485 |
# TODO: stop using this; it creates new indices unnecessarily.
|
|
|
2592.3.176
by Robert Collins
Various pack refactorings. |
1486 |
self.ensure_loaded() |
|
2592.3.226
by Martin Pool
formatting and docstrings |
1487 |
suffix_map = {'.rix': 'revision_index', |
1488 |
'.six': 'signature_index', |
|
1489 |
'.iix': 'inventory_index', |
|
1490 |
'.tix': 'text_index', |
|
|
2592.3.207
by Robert Collins
Start removing the dependency on RepositoryPackCollection._make_index_map. |
1491 |
}
|
1492 |
return self._packs_list_to_pack_map_and_index_list(self.all_packs(), |
|
1493 |
suffix_map[index_suffix]) |
|
|
2592.5.15
by Martin Pool
Split out common code for making index maps |
1494 |
|
|
2592.3.179
by Robert Collins
Generate the revision_index_map for packing during the core operation, from the pack objects. |
1495 |
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute): |
1496 |
"""Convert a list of packs to an index pack map and index list. |
|
1497 |
||
1498 |
:param packs: The packs list to process.
|
|
1499 |
:param index_attribute: The attribute that the desired index is found
|
|
1500 |
on.
|
|
1501 |
:return: A tuple (map, list) where map contains the dict from
|
|
1502 |
index:pack_tuple, and lsit contains the indices in the same order
|
|
1503 |
as the packs list.
|
|
1504 |
"""
|
|
1505 |
indices = [] |
|
1506 |
pack_map = {} |
|
1507 |
for pack in packs: |
|
1508 |
index = getattr(pack, index_attribute) |
|
1509 |
indices.append(index) |
|
|
2592.3.200
by Robert Collins
Make NewPack reopen the index files, separating out the task of refreshing the index maps in the repository and managing the completion of writing a single pack to disk. |
1510 |
pack_map[index] = (pack.pack_transport, pack.file_name()) |
|
2592.3.179
by Robert Collins
Generate the revision_index_map for packing during the core operation, from the pack objects. |
1511 |
return pack_map, indices |
1512 |
||
|
2592.3.93
by Robert Collins
Steps toward filtering revisions/inventories/texts during fetch. |
1513 |
def _index_contents(self, pack_map, key_filter=None): |
1514 |
"""Get an iterable of the index contents from a pack_map. |
|
1515 |
||
1516 |
:param pack_map: A map from indices to pack details.
|
|
1517 |
:param key_filter: An optional filter to limit the
|
|
1518 |
keys returned.
|
|
1519 |
"""
|
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1520 |
indices = [index for index in pack_map.iterkeys()] |
1521 |
all_index = CombinedGraphIndex(indices) |
|
|
2592.3.93
by Robert Collins
Steps toward filtering revisions/inventories/texts during fetch. |
1522 |
if key_filter is None: |
1523 |
return all_index.iter_all_entries() |
|
1524 |
else: |
|
1525 |
return all_index.iter_entries(key_filter) |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1526 |
|
|
2592.3.237
by Martin Pool
Rename RepositoryPackCollection.release_names to _unlock_names |
1527 |
def _unlock_names(self): |
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1528 |
"""Release the mutex around the pack-names index.""" |
1529 |
self.repo.control_files.unlock() |
|
1530 |
||
|
2948.1.1
by Robert Collins
* Obsolete packs are now cleaned up by pack and autopack operations. |
1531 |
def _save_pack_names(self, clear_obsolete_packs=False): |
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1532 |
"""Save the list of packs. |
1533 |
||
1534 |
This will take out the mutex around the pack names list for the
|
|
1535 |
duration of the method call. If concurrent updates have been made, a
|
|
1536 |
three-way merge between the current list and the current in memory list
|
|
1537 |
is performed.
|
|
|
2948.1.1
by Robert Collins
* Obsolete packs are now cleaned up by pack and autopack operations. |
1538 |
|
1539 |
:param clear_obsolete_packs: If True, clear out the contents of the
|
|
1540 |
obsolete_packs directory.
|
|
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1541 |
"""
|
1542 |
self.lock_names() |
|
1543 |
try: |
|
1544 |
builder = GraphIndexBuilder() |
|
1545 |
# load the disk nodes across
|
|
1546 |
disk_nodes = set() |
|
1547 |
for index, key, value in self._iter_disk_pack_index(): |
|
1548 |
disk_nodes.add((key, value)) |
|
1549 |
# do a two-way diff against our original content
|
|
1550 |
current_nodes = set() |
|
1551 |
for name, sizes in self._names.iteritems(): |
|
1552 |
current_nodes.add( |
|
1553 |
((name, ), ' '.join(str(size) for size in sizes))) |
|
1554 |
deleted_nodes = self._packs_at_load - current_nodes |
|
1555 |
new_nodes = current_nodes - self._packs_at_load |
|
1556 |
disk_nodes.difference_update(deleted_nodes) |
|
1557 |
disk_nodes.update(new_nodes) |
|
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1558 |
# TODO: handle same-name, index-size-changes here -
|
1559 |
# e.g. use the value from disk, not ours, *unless* we're the one
|
|
1560 |
# changing it.
|
|
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1561 |
for key, value in disk_nodes: |
1562 |
builder.add_node(key, value) |
|
|
3010.1.11
by Robert Collins
Provide file modes to files created by pack repositories |
1563 |
self.transport.put_file('pack-names', builder.finish(), |
|
3416.2.2
by Martin Pool
Change some callers to get file and directory permissions from bzrdir not LockableFiles |
1564 |
mode=self.repo.bzrdir._get_file_mode()) |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1565 |
# move the baseline forward
|
1566 |
self._packs_at_load = disk_nodes |
|
|
2948.1.1
by Robert Collins
* Obsolete packs are now cleaned up by pack and autopack operations. |
1567 |
if clear_obsolete_packs: |
|
3446.2.1
by Martin Pool
Failure to delete an obsolete pack file should not be fatal. |
1568 |
self._clear_obsolete_packs() |
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1569 |
finally: |
|
2592.3.237
by Martin Pool
Rename RepositoryPackCollection.release_names to _unlock_names |
1570 |
self._unlock_names() |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1571 |
# synchronise the memory packs list with what we just wrote:
|
1572 |
new_names = dict(disk_nodes) |
|
1573 |
# drop no longer present nodes
|
|
1574 |
for pack in self.all_packs(): |
|
1575 |
if (pack.name,) not in new_names: |
|
|
2592.3.236
by Martin Pool
Make RepositoryPackCollection.remove_pack_from_memory private |
1576 |
self._remove_pack_from_memory(pack) |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1577 |
# add new nodes/refresh existing ones
|
1578 |
for key, value in disk_nodes: |
|
1579 |
name = key[0] |
|
1580 |
sizes = self._parse_index_sizes(value) |
|
1581 |
if name in self._names: |
|
1582 |
# existing
|
|
1583 |
if sizes != self._names[name]: |
|
1584 |
# the pack for name has had its indices replaced - rare but
|
|
1585 |
# important to handle. XXX: probably can never happen today
|
|
1586 |
# because the three-way merge code above does not handle it
|
|
1587 |
# - you may end up adding the same key twice to the new
|
|
1588 |
# disk index because the set values are the same, unless
|
|
1589 |
# the only index shows up as deleted by the set difference
|
|
1590 |
# - which it may. Until there is a specific test for this,
|
|
1591 |
# assume its broken. RBC 20071017.
|
|
|
2592.3.236
by Martin Pool
Make RepositoryPackCollection.remove_pack_from_memory private |
1592 |
self._remove_pack_from_memory(self.get_pack_by_name(name)) |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1593 |
self._names[name] = sizes |
1594 |
self.get_pack_by_name(name) |
|
1595 |
else: |
|
1596 |
# new
|
|
1597 |
self._names[name] = sizes |
|
1598 |
self.get_pack_by_name(name) |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1599 |
|
|
3446.2.1
by Martin Pool
Failure to delete an obsolete pack file should not be fatal. |
1600 |
def _clear_obsolete_packs(self): |
1601 |
"""Delete everything from the obsolete-packs directory. |
|
1602 |
"""
|
|
1603 |
obsolete_pack_transport = self.transport.clone('obsolete_packs') |
|
1604 |
for filename in obsolete_pack_transport.list_dir('.'): |
|
1605 |
try: |
|
1606 |
obsolete_pack_transport.delete(filename) |
|
1607 |
except (errors.PathError, errors.TransportError), e: |
|
1608 |
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,)) |
|
1609 |
||
|
2592.3.202
by Robert Collins
Move write stream management into NewPack. |
1610 |
def _start_write_group(self): |
|
2592.3.190
by Robert Collins
Move flush and reset operations to the pack collection rather than the thunk layers. |
1611 |
# Do not permit preparation for writing if we're not in a 'write lock'.
|
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1612 |
if not self.repo.is_write_locked(): |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1613 |
raise errors.NotWriteLocked(self) |
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
1614 |
self._new_pack = NewPack(self._upload_transport, self._index_transport, |
|
3010.1.11
by Robert Collins
Provide file modes to files created by pack repositories |
1615 |
self._pack_transport, upload_suffix='.pack', |
|
3416.2.2
by Martin Pool
Change some callers to get file and directory permissions from bzrdir not LockableFiles |
1616 |
file_mode=self.repo.bzrdir._get_file_mode()) |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1617 |
# allow writing: queue writes to a new index
|
1618 |
self.revision_index.add_writable_index(self._new_pack.revision_index, |
|
1619 |
self._new_pack) |
|
|
2592.3.211
by Robert Collins
Pack inventory index management cleaned up. |
1620 |
self.inventory_index.add_writable_index(self._new_pack.inventory_index, |
1621 |
self._new_pack) |
|
|
2592.3.212
by Robert Collins
Cleanup text index management in packs. |
1622 |
self.text_index.add_writable_index(self._new_pack.text_index, |
1623 |
self._new_pack) |
|
|
2592.3.210
by Robert Collins
Signature index management looking sane for packs. |
1624 |
self.signature_index.add_writable_index(self._new_pack.signature_index, |
1625 |
self._new_pack) |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1626 |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1627 |
self.repo.inventories._index._add_callback = self.inventory_index.add_callback |
1628 |
self.repo.revisions._index._add_callback = self.revision_index.add_callback |
|
1629 |
self.repo.signatures._index._add_callback = self.signature_index.add_callback |
|
1630 |
self.repo.texts._index._add_callback = self.text_index.add_callback |
|
|
2592.5.9
by Martin Pool
Move some more bits that seem to belong in RepositoryPackCollection into there |
1631 |
|
|
2592.5.8
by Martin Pool
Delegate abort_write_group to RepositoryPackCollection |
1632 |
def _abort_write_group(self): |
1633 |
# FIXME: just drop the transient index.
|
|
1634 |
# forget what names there are
|
|
|
3163.1.2
by Martin Pool
RepositoryPackCollection._abort_write_group should check it actually has a new pack before aborting (#180208) |
1635 |
if self._new_pack is not None: |
1636 |
self._new_pack.abort() |
|
1637 |
self._remove_pack_indices(self._new_pack) |
|
1638 |
self._new_pack = None |
|
|
2592.3.213
by Robert Collins
Retain packs and indices in memory within a lock, even when write groups are entered and exited. |
1639 |
self.repo._text_knit = None |
|
2592.5.6
by Martin Pool
Move pack repository start_write_group to pack collection object |
1640 |
|
|
2592.5.7
by Martin Pool
move commit_write_group to RepositoryPackCollection |
1641 |
def _commit_write_group(self): |
|
2592.3.213
by Robert Collins
Retain packs and indices in memory within a lock, even when write groups are entered and exited. |
1642 |
self._remove_pack_indices(self._new_pack) |
|
2592.3.198
by Robert Collins
Factor out data_inserted to reduce code duplication in detecting empty packs. |
1643 |
if self._new_pack.data_inserted(): |
|
2592.3.209
by Robert Collins
Revision index management looking sane for packs. |
1644 |
# get all the data to disk and read to use
|
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
1645 |
self._new_pack.finish() |
|
2592.3.201
by Robert Collins
Cleanup RepositoryPackCollection.allocate. |
1646 |
self.allocate(self._new_pack) |
|
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
1647 |
self._new_pack = None |
|
2592.5.7
by Martin Pool
move commit_write_group to RepositoryPackCollection |
1648 |
if not self.autopack(): |
|
2592.3.201
by Robert Collins
Cleanup RepositoryPackCollection.allocate. |
1649 |
# when autopack takes no steps, the names list is still
|
1650 |
# unsaved.
|
|
|
2592.5.10
by Martin Pool
Rename RepositoryPackCollection.save to _save_pack_names |
1651 |
self._save_pack_names() |
|
2592.5.7
by Martin Pool
move commit_write_group to RepositoryPackCollection |
1652 |
else: |
|
2592.3.202
by Robert Collins
Move write stream management into NewPack. |
1653 |
self._new_pack.abort() |
|
2951.1.1
by Robert Collins
(robertc) Fix data-refresh logic for packs not to refresh mid-transaction when a names write lock is held. (Robert Collins) |
1654 |
self._new_pack = None |
|
2592.3.213
by Robert Collins
Retain packs and indices in memory within a lock, even when write groups are entered and exited. |
1655 |
self.repo._text_knit = None |
|
2592.5.8
by Martin Pool
Delegate abort_write_group to RepositoryPackCollection |
1656 |
|
1657 |
||
|
2592.3.224
by Martin Pool
Rename GraphKnitRepository etc to KnitPackRepository |
1658 |
class KnitPackRepository(KnitRepository): |
|
3350.6.7
by Robert Collins
Review feedback, making things more clear, adding documentation on what is used where. |
1659 |
"""Repository with knit objects stored inside pack containers. |
1660 |
|
|
1661 |
The layering for a KnitPackRepository is:
|
|
1662 |
||
1663 |
Graph | HPSS | Repository public layer |
|
|
1664 |
===================================================
|
|
1665 |
Tuple based apis below, string based, and key based apis above
|
|
1666 |
---------------------------------------------------
|
|
1667 |
KnitVersionedFiles
|
|
1668 |
Provides .texts, .revisions etc
|
|
1669 |
This adapts the N-tuple keys to physical knit records which only have a
|
|
1670 |
single string identifier (for historical reasons), which in older formats
|
|
1671 |
was always the revision_id, and in the mapped code for packs is always
|
|
1672 |
the last element of key tuples.
|
|
1673 |
---------------------------------------------------
|
|
1674 |
GraphIndex
|
|
1675 |
A separate GraphIndex is used for each of the
|
|
1676 |
texts/inventories/revisions/signatures contained within each individual
|
|
1677 |
pack file. The GraphIndex layer works in N-tuples and is unaware of any
|
|
1678 |
semantic value.
|
|
1679 |
===================================================
|
|
1680 |
|
|
1681 |
"""
|
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1682 |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1683 |
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class, |
1684 |
_serializer): |
|
1685 |
KnitRepository.__init__(self, _format, a_bzrdir, control_files, |
|
1686 |
_commit_builder_class, _serializer) |
|
|
3407.2.13
by Martin Pool
Remove indirection through control_files to get transports |
1687 |
index_transport = self._transport.clone('indices') |
|
3350.6.5
by Robert Collins
Update to bzr.dev. |
1688 |
self._pack_collection = RepositoryPackCollection(self, self._transport, |
|
2592.5.11
by Martin Pool
Move upload_transport from pack repositories to the pack collection |
1689 |
index_transport, |
|
3407.2.13
by Martin Pool
Remove indirection through control_files to get transports |
1690 |
self._transport.clone('upload'), |
1691 |
self._transport.clone('packs')) |
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1692 |
self.inventories = KnitVersionedFiles( |
1693 |
_KnitGraphIndex(self._pack_collection.inventory_index.combined_index, |
|
1694 |
add_callback=self._pack_collection.inventory_index.add_callback, |
|
1695 |
deltas=True, parents=True, is_locked=self.is_locked), |
|
1696 |
data_access=self._pack_collection.inventory_index.data_access, |
|
1697 |
max_delta_chain=200) |
|
1698 |
self.revisions = KnitVersionedFiles( |
|
1699 |
_KnitGraphIndex(self._pack_collection.revision_index.combined_index, |
|
1700 |
add_callback=self._pack_collection.revision_index.add_callback, |
|
1701 |
deltas=False, parents=True, is_locked=self.is_locked), |
|
1702 |
data_access=self._pack_collection.revision_index.data_access, |
|
1703 |
max_delta_chain=0) |
|
1704 |
self.signatures = KnitVersionedFiles( |
|
1705 |
_KnitGraphIndex(self._pack_collection.signature_index.combined_index, |
|
1706 |
add_callback=self._pack_collection.signature_index.add_callback, |
|
1707 |
deltas=False, parents=False, is_locked=self.is_locked), |
|
1708 |
data_access=self._pack_collection.signature_index.data_access, |
|
1709 |
max_delta_chain=0) |
|
1710 |
self.texts = KnitVersionedFiles( |
|
1711 |
_KnitGraphIndex(self._pack_collection.text_index.combined_index, |
|
1712 |
add_callback=self._pack_collection.text_index.add_callback, |
|
1713 |
deltas=True, parents=True, is_locked=self.is_locked), |
|
1714 |
data_access=self._pack_collection.text_index.data_access, |
|
1715 |
max_delta_chain=200) |
|
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1716 |
# True when the repository object is 'write locked' (as opposed to the
|
1717 |
# physical lock only taken out around changes to the pack-names list.)
|
|
1718 |
# Another way to represent this would be a decorator around the control
|
|
1719 |
# files object that presents logical locks as physical ones - if this
|
|
1720 |
# gets ugly consider that alternative design. RBC 20071011
|
|
1721 |
self._write_lock_count = 0 |
|
1722 |
self._transaction = None |
|
|
2592.3.96
by Robert Collins
Merge index improvements (includes bzr.dev). |
1723 |
# for tests
|
|
2951.1.3
by Robert Collins
Partial support for native reconcile with packs. |
1724 |
self._reconcile_does_inventory_gc = True |
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
1725 |
self._reconcile_fixes_text_parents = True |
|
2951.1.3
by Robert Collins
Partial support for native reconcile with packs. |
1726 |
self._reconcile_backsup_inventory = False |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1727 |
|
1728 |
def _abort_write_group(self): |
|
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1729 |
self._pack_collection._abort_write_group() |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1730 |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1731 |
def _find_inconsistent_revision_parents(self): |
1732 |
"""Find revisions with incorrectly cached parents. |
|
1733 |
||
1734 |
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
|
|
1735 |
parents-in-revision).
|
|
1736 |
"""
|
|
|
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1737 |
if not self.is_locked(): |
1738 |
raise errors.ObjectNotLocked(self) |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1739 |
pb = ui.ui_factory.nested_progress_bar() |
|
2951.1.11
by Robert Collins
Do not try to use try:finally: around a yield for python 2.4. |
1740 |
result = [] |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1741 |
try: |
1742 |
revision_nodes = self._pack_collection.revision_index \ |
|
1743 |
.combined_index.iter_all_entries() |
|
1744 |
index_positions = [] |
|
1745 |
# Get the cached index values for all revisions, and also the location
|
|
1746 |
# in each index of the revision text so we can perform linear IO.
|
|
1747 |
for index, key, value, refs in revision_nodes: |
|
1748 |
pos, length = value[1:].split(' ') |
|
1749 |
index_positions.append((index, int(pos), key[0], |
|
1750 |
tuple(parent[0] for parent in refs[0]))) |
|
1751 |
pb.update("Reading revision index.", 0, 0) |
|
1752 |
index_positions.sort() |
|
|
2951.1.10
by Robert Collins
Peer review feedback with Ian. |
1753 |
batch_count = len(index_positions) / 1000 + 1 |
1754 |
pb.update("Checking cached revision graph.", 0, batch_count) |
|
1755 |
for offset in xrange(batch_count): |
|
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1756 |
pb.update("Checking cached revision graph.", offset) |
1757 |
to_query = index_positions[offset * 1000:(offset + 1) * 1000] |
|
1758 |
if not to_query: |
|
1759 |
break
|
|
1760 |
rev_ids = [item[2] for item in to_query] |
|
1761 |
revs = self.get_revisions(rev_ids) |
|
1762 |
for revision, item in zip(revs, to_query): |
|
1763 |
index_parents = item[3] |
|
1764 |
rev_parents = tuple(revision.parent_ids) |
|
1765 |
if index_parents != rev_parents: |
|
|
2951.1.11
by Robert Collins
Do not try to use try:finally: around a yield for python 2.4. |
1766 |
result.append((revision.revision_id, index_parents, rev_parents)) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1767 |
finally: |
1768 |
pb.finished() |
|
|
2951.1.11
by Robert Collins
Do not try to use try:finally: around a yield for python 2.4. |
1769 |
return result |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1770 |
|
|
3099.3.3
by John Arbash Meinel
Deprecate get_parents() in favor of get_parent_map() |
1771 |
@symbol_versioning.deprecated_method(symbol_versioning.one_one) |
|
2592.3.216
by Robert Collins
Implement get_parents and _make_parents_provider for Pack repositories. |
1772 |
def get_parents(self, revision_ids): |
|
3099.3.1
by John Arbash Meinel
Implement get_parent_map for ParentProviders |
1773 |
"""See graph._StackedParentsProvider.get_parents.""" |
1774 |
parent_map = self.get_parent_map(revision_ids) |
|
1775 |
return [parent_map.get(r, None) for r in revision_ids] |
|
1776 |
||
1777 |
def get_parent_map(self, keys): |
|
1778 |
"""See graph._StackedParentsProvider.get_parent_map |
|
1779 |
||
|
2592.3.216
by Robert Collins
Implement get_parents and _make_parents_provider for Pack repositories. |
1780 |
This implementation accesses the combined revision index to provide
|
1781 |
answers.
|
|
1782 |
"""
|
|
|
2947.1.1
by Robert Collins
(robertc) Fix pack-repository to support get_parents calls as the first call on a repository, and fix full-branch push/pull performance to not suck terribly. (Robert Collins) |
1783 |
self._pack_collection.ensure_loaded() |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1784 |
index = self._pack_collection.revision_index.combined_index |
|
3099.3.1
by John Arbash Meinel
Implement get_parent_map for ParentProviders |
1785 |
keys = set(keys) |
|
3373.5.2
by John Arbash Meinel
Add repository_implementation tests for get_parent_map |
1786 |
if None in keys: |
1787 |
raise ValueError('get_parent_map(None) is not valid') |
|
|
3099.3.1
by John Arbash Meinel
Implement get_parent_map for ParentProviders |
1788 |
if _mod_revision.NULL_REVISION in keys: |
1789 |
keys.discard(_mod_revision.NULL_REVISION) |
|
|
3146.1.2
by Aaron Bentley
ParentsProviders now provide tuples of parents, never lists |
1790 |
found_parents = {_mod_revision.NULL_REVISION:()} |
|
3099.3.1
by John Arbash Meinel
Implement get_parent_map for ParentProviders |
1791 |
else: |
1792 |
found_parents = {} |
|
1793 |
search_keys = set((revision_id,) for revision_id in keys) |
|
|
2592.3.216
by Robert Collins
Implement get_parents and _make_parents_provider for Pack repositories. |
1794 |
for index, key, value, refs in index.iter_entries(search_keys): |
1795 |
parents = refs[0] |
|
1796 |
if not parents: |
|
1797 |
parents = (_mod_revision.NULL_REVISION,) |
|
1798 |
else: |
|
1799 |
parents = tuple(parent[0] for parent in parents) |
|
1800 |
found_parents[key[0]] = parents |
|
|
3099.3.1
by John Arbash Meinel
Implement get_parent_map for ParentProviders |
1801 |
return found_parents |
|
2592.3.216
by Robert Collins
Implement get_parents and _make_parents_provider for Pack repositories. |
1802 |
|
1803 |
def _make_parents_provider(self): |
|
|
3099.3.1
by John Arbash Meinel
Implement get_parent_map for ParentProviders |
1804 |
return graph.CachingParentsProvider(self) |
|
2592.3.216
by Robert Collins
Implement get_parents and _make_parents_provider for Pack repositories. |
1805 |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1806 |
def _refresh_data(self): |
|
2951.1.1
by Robert Collins
(robertc) Fix data-refresh logic for packs not to refresh mid-transaction when a names write lock is held. (Robert Collins) |
1807 |
if self._write_lock_count == 1 or ( |
1808 |
self.control_files._lock_count == 1 and |
|
1809 |
self.control_files._lock_mode == 'r'): |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1810 |
# forget what names there are
|
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1811 |
self._pack_collection.reset() |
|
2592.3.219
by Robert Collins
Review feedback. |
1812 |
# XXX: Better to do an in-memory merge when acquiring a new lock -
|
1813 |
# factor out code from _save_pack_names.
|
|
|
2949.1.2
by Robert Collins
* Fetch with pack repositories will no longer read the entire history graph. |
1814 |
self._pack_collection.ensure_loaded() |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1815 |
|
1816 |
def _start_write_group(self): |
|
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1817 |
self._pack_collection._start_write_group() |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1818 |
|
1819 |
def _commit_write_group(self): |
|
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1820 |
return self._pack_collection._commit_write_group() |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1821 |
|
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1822 |
def get_transaction(self): |
1823 |
if self._write_lock_count: |
|
1824 |
return self._transaction |
|
1825 |
else: |
|
1826 |
return self.control_files.get_transaction() |
|
1827 |
||
1828 |
def is_locked(self): |
|
1829 |
return self._write_lock_count or self.control_files.is_locked() |
|
1830 |
||
1831 |
def is_write_locked(self): |
|
1832 |
return self._write_lock_count |
|
1833 |
||
1834 |
def lock_write(self, token=None): |
|
1835 |
if not self._write_lock_count and self.is_locked(): |
|
1836 |
raise errors.ReadOnlyError(self) |
|
1837 |
self._write_lock_count += 1 |
|
1838 |
if self._write_lock_count == 1: |
|
1839 |
from bzrlib import transactions |
|
1840 |
self._transaction = transactions.WriteTransaction() |
|
1841 |
self._refresh_data() |
|
1842 |
||
1843 |
def lock_read(self): |
|
1844 |
if self._write_lock_count: |
|
1845 |
self._write_lock_count += 1 |
|
1846 |
else: |
|
1847 |
self.control_files.lock_read() |
|
1848 |
self._refresh_data() |
|
1849 |
||
1850 |
def leave_lock_in_place(self): |
|
1851 |
# not supported - raise an error
|
|
1852 |
raise NotImplementedError(self.leave_lock_in_place) |
|
1853 |
||
1854 |
def dont_leave_lock_in_place(self): |
|
1855 |
# not supported - raise an error
|
|
1856 |
raise NotImplementedError(self.dont_leave_lock_in_place) |
|
1857 |
||
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1858 |
@needs_write_lock
|
1859 |
def pack(self): |
|
1860 |
"""Compress the data within the repository. |
|
1861 |
||
1862 |
This will pack all the data to a single pack. In future it may
|
|
1863 |
recompress deltas or do other such expensive operations.
|
|
1864 |
"""
|
|
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1865 |
self._pack_collection.pack() |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1866 |
|
1867 |
@needs_write_lock
|
|
1868 |
def reconcile(self, other=None, thorough=False): |
|
1869 |
"""Reconcile this repository.""" |
|
1870 |
from bzrlib.reconcile import PackReconciler |
|
1871 |
reconciler = PackReconciler(self, thorough=thorough) |
|
1872 |
reconciler.reconcile() |
|
1873 |
return reconciler |
|
1874 |
||
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1875 |
def unlock(self): |
1876 |
if self._write_lock_count == 1 and self._write_group is not None: |
|
|
2592.3.244
by Martin Pool
unlock while in a write group now aborts the write group, unlocks, and errors. |
1877 |
self.abort_write_group() |
1878 |
self._transaction = None |
|
1879 |
self._write_lock_count = 0 |
|
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1880 |
raise errors.BzrError( |
|
2592.3.244
by Martin Pool
unlock while in a write group now aborts the write group, unlocks, and errors. |
1881 |
'Must end write group before releasing write lock on %s' |
1882 |
% self) |
|
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1883 |
if self._write_lock_count: |
1884 |
self._write_lock_count -= 1 |
|
1885 |
if not self._write_lock_count: |
|
1886 |
transaction = self._transaction |
|
1887 |
self._transaction = None |
|
1888 |
transaction.finish() |
|
1889 |
else: |
|
1890 |
self.control_files.unlock() |
|
1891 |
||
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1892 |
|
1893 |
class RepositoryFormatPack(MetaDirRepositoryFormat): |
|
1894 |
"""Format logic for pack structured repositories. |
|
1895 |
||
1896 |
This repository format has:
|
|
1897 |
- a list of packs in pack-names
|
|
1898 |
- packs in packs/NAME.pack
|
|
1899 |
- indices in indices/NAME.{iix,six,tix,rix}
|
|
1900 |
- knit deltas in the packs, knit indices mapped to the indices.
|
|
1901 |
- thunk objects to support the knits programming API.
|
|
1902 |
- a format marker of its own
|
|
1903 |
- an optional 'shared-storage' flag
|
|
1904 |
- an optional 'no-working-trees' flag
|
|
1905 |
- a LockDir lock
|
|
1906 |
"""
|
|
1907 |
||
|
2592.3.166
by Robert Collins
Merge KnitRepository3 removal branch. |
1908 |
# Set this attribute in derived classes to control the repository class
|
1909 |
# created by open and initialize.
|
|
1910 |
repository_class = None |
|
1911 |
# Set this attribute in derived classes to control the
|
|
1912 |
# _commit_builder_class that the repository objects will have passed to
|
|
1913 |
# their constructor.
|
|
1914 |
_commit_builder_class = None |
|
1915 |
# Set this attribute in derived clases to control the _serializer that the
|
|
1916 |
# repository objects will have passed to their constructor.
|
|
1917 |
_serializer = None |
|
|
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
1918 |
# External references are not supported in pack repositories yet.
|
1919 |
supports_external_lookups = False |
|
|
2592.3.166
by Robert Collins
Merge KnitRepository3 removal branch. |
1920 |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1921 |
def initialize(self, a_bzrdir, shared=False): |
1922 |
"""Create a pack based repository. |
|
1923 |
||
1924 |
:param a_bzrdir: bzrdir to contain the new repository; must already
|
|
1925 |
be initialized.
|
|
1926 |
:param shared: If true the repository will be initialized as a shared
|
|
1927 |
repository.
|
|
1928 |
"""
|
|
1929 |
mutter('creating repository in %s.', a_bzrdir.transport.base) |
|
1930 |
dirs = ['indices', 'obsolete_packs', 'packs', 'upload'] |
|
1931 |
builder = GraphIndexBuilder() |
|
1932 |
files = [('pack-names', builder.finish())] |
|
1933 |
utf8_files = [('format', self.get_format_string())] |
|
1934 |
||
1935 |
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared) |
|
1936 |
return self.open(a_bzrdir=a_bzrdir, _found=True) |
|
1937 |
||
1938 |
def open(self, a_bzrdir, _found=False, _override_transport=None): |
|
1939 |
"""See RepositoryFormat.open(). |
|
1940 |
|
|
1941 |
:param _override_transport: INTERNAL USE ONLY. Allows opening the
|
|
1942 |
repository at a slightly different url
|
|
1943 |
than normal. I.e. during 'upgrade'.
|
|
1944 |
"""
|
|
1945 |
if not _found: |
|
1946 |
format = RepositoryFormat.find_format(a_bzrdir) |
|
1947 |
if _override_transport is not None: |
|
1948 |
repo_transport = _override_transport |
|
1949 |
else: |
|
1950 |
repo_transport = a_bzrdir.get_repository_transport(None) |
|
1951 |
control_files = lockable_files.LockableFiles(repo_transport, |
|
1952 |
'lock', lockdir.LockDir) |
|
1953 |
return self.repository_class(_format=self, |
|
1954 |
a_bzrdir=a_bzrdir, |
|
1955 |
control_files=control_files, |
|
|
2592.3.166
by Robert Collins
Merge KnitRepository3 removal branch. |
1956 |
_commit_builder_class=self._commit_builder_class, |
1957 |
_serializer=self._serializer) |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1958 |
|
1959 |
||
|
2592.3.224
by Martin Pool
Rename GraphKnitRepository etc to KnitPackRepository |
1960 |
class RepositoryFormatKnitPack1(RepositoryFormatPack): |
|
3128.1.3
by Vincent Ladeuil
Since we are there s/parameteris.*/parameteriz&/. |
1961 |
"""A no-subtrees parameterized Pack repository. |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1962 |
|
|
2939.2.1
by Ian Clatworthy
use 'knitpack' naming instead of 'experimental' for pack formats |
1963 |
This format was introduced in 0.92.
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1964 |
"""
|
1965 |
||
|
2592.3.224
by Martin Pool
Rename GraphKnitRepository etc to KnitPackRepository |
1966 |
repository_class = KnitPackRepository |
|
2592.3.166
by Robert Collins
Merge KnitRepository3 removal branch. |
1967 |
_commit_builder_class = PackCommitBuilder |
1968 |
_serializer = xml5.serializer_v5 |
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1969 |
|
1970 |
def _get_matching_bzrdir(self): |
|
|
3010.3.2
by Martin Pool
Rename pack0.92 to pack-0.92 |
1971 |
return bzrdir.format_registry.make_bzrdir('pack-0.92') |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1972 |
|
1973 |
def _ignore_setting_bzrdir(self, format): |
|
1974 |
pass
|
|
1975 |
||
1976 |
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir) |
|
1977 |
||
1978 |
def get_format_string(self): |
|
1979 |
"""See RepositoryFormat.get_format_string().""" |
|
|
2939.2.6
by Ian Clatworthy
more review feedback from lifeless and poolie |
1980 |
return "Bazaar pack repository format 1 (needs bzr 0.92)\n" |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1981 |
|
1982 |
def get_format_description(self): |
|
1983 |
"""See RepositoryFormat.get_format_description().""" |
|
|
2939.2.1
by Ian Clatworthy
use 'knitpack' naming instead of 'experimental' for pack formats |
1984 |
return "Packs containing knits without subtree support" |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1985 |
|
1986 |
def check_conversion_target(self, target_format): |
|
1987 |
pass
|
|
1988 |
||
1989 |
||
|
2592.3.224
by Martin Pool
Rename GraphKnitRepository etc to KnitPackRepository |
1990 |
class RepositoryFormatKnitPack3(RepositoryFormatPack): |
|
3128.1.3
by Vincent Ladeuil
Since we are there s/parameteris.*/parameteriz&/. |
1991 |
"""A subtrees parameterized Pack repository. |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1992 |
|
|
2592.3.215
by Robert Collins
Review feedback. |
1993 |
This repository format uses the xml7 serializer to get:
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1994 |
- support for recording full info about the tree root
|
1995 |
- support for recording tree-references
|
|
|
2592.3.215
by Robert Collins
Review feedback. |
1996 |
|
|
2939.2.1
by Ian Clatworthy
use 'knitpack' naming instead of 'experimental' for pack formats |
1997 |
This format was introduced in 0.92.
|
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
1998 |
"""
|
1999 |
||
|
2592.3.224
by Martin Pool
Rename GraphKnitRepository etc to KnitPackRepository |
2000 |
repository_class = KnitPackRepository |
|
2592.3.166
by Robert Collins
Merge KnitRepository3 removal branch. |
2001 |
_commit_builder_class = PackRootCommitBuilder |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
2002 |
rich_root_data = True |
2003 |
supports_tree_reference = True |
|
|
2592.3.166
by Robert Collins
Merge KnitRepository3 removal branch. |
2004 |
_serializer = xml7.serializer_v7 |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
2005 |
|
2006 |
def _get_matching_bzrdir(self): |
|
|
2939.2.5
by Ian Clatworthy
review feedback from lifeless |
2007 |
return bzrdir.format_registry.make_bzrdir( |
|
3010.3.2
by Martin Pool
Rename pack0.92 to pack-0.92 |
2008 |
'pack-0.92-subtree') |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
2009 |
|
2010 |
def _ignore_setting_bzrdir(self, format): |
|
2011 |
pass
|
|
2012 |
||
2013 |
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir) |
|
2014 |
||
2015 |
def check_conversion_target(self, target_format): |
|
2016 |
if not target_format.rich_root_data: |
|
2017 |
raise errors.BadConversionTarget( |
|
2018 |
'Does not support rich root data.', target_format) |
|
2019 |
if not getattr(target_format, 'supports_tree_reference', False): |
|
2020 |
raise errors.BadConversionTarget( |
|
2021 |
'Does not support nested trees', target_format) |
|
2022 |
||
2023 |
def get_format_string(self): |
|
2024 |
"""See RepositoryFormat.get_format_string().""" |
|
|
2939.2.6
by Ian Clatworthy
more review feedback from lifeless and poolie |
2025 |
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n" |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
2026 |
|
2027 |
def get_format_description(self): |
|
2028 |
"""See RepositoryFormat.get_format_description().""" |
|
|
2939.2.1
by Ian Clatworthy
use 'knitpack' naming instead of 'experimental' for pack formats |
2029 |
return "Packs containing knits with subtree support\n" |
|
2996.2.11
by Aaron Bentley
Implement rich-root-pack format ( #164639) |
2030 |
|
2031 |
||
2032 |
class RepositoryFormatKnitPack4(RepositoryFormatPack): |
|
|
3128.1.3
by Vincent Ladeuil
Since we are there s/parameteris.*/parameteriz&/. |
2033 |
"""A rich-root, no subtrees parameterized Pack repository. |
|
2996.2.11
by Aaron Bentley
Implement rich-root-pack format ( #164639) |
2034 |
|
|
2996.2.12
by Aaron Bentley
Text fixes from review |
2035 |
This repository format uses the xml6 serializer to get:
|
|
2996.2.11
by Aaron Bentley
Implement rich-root-pack format ( #164639) |
2036 |
- support for recording full info about the tree root
|
2037 |
||
|
2996.2.12
by Aaron Bentley
Text fixes from review |
2038 |
This format was introduced in 1.0.
|
|
2996.2.11
by Aaron Bentley
Implement rich-root-pack format ( #164639) |
2039 |
"""
|
2040 |
||
2041 |
repository_class = KnitPackRepository |
|
2042 |
_commit_builder_class = PackRootCommitBuilder |
|
2043 |
rich_root_data = True |
|
2044 |
supports_tree_reference = False |
|
2045 |
_serializer = xml6.serializer_v6 |
|
2046 |
||
2047 |
def _get_matching_bzrdir(self): |
|
2048 |
return bzrdir.format_registry.make_bzrdir( |
|
2049 |
'rich-root-pack') |
|
2050 |
||
2051 |
def _ignore_setting_bzrdir(self, format): |
|
2052 |
pass
|
|
2053 |
||
2054 |
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir) |
|
2055 |
||
2056 |
def check_conversion_target(self, target_format): |
|
2057 |
if not target_format.rich_root_data: |
|
2058 |
raise errors.BadConversionTarget( |
|
2059 |
'Does not support rich root data.', target_format) |
|
2060 |
||
2061 |
def get_format_string(self): |
|
2062 |
"""See RepositoryFormat.get_format_string().""" |
|
2063 |
return ("Bazaar pack repository format 1 with rich root" |
|
2064 |
" (needs bzr 1.0)\n") |
|
2065 |
||
2066 |
def get_format_description(self): |
|
2067 |
"""See RepositoryFormat.get_format_description().""" |
|
2068 |
return "Packs containing knits with rich root support\n" |
|
|
3152.2.1
by Robert Collins
* A new repository format 'development' has been added. This format will |
2069 |
|
2070 |
||
2071 |
class RepositoryFormatPackDevelopment0(RepositoryFormatPack): |
|
2072 |
"""A no-subtrees development repository. |
|
2073 |
||
2074 |
This format should be retained until the second release after bzr 1.0.
|
|
2075 |
||
2076 |
No changes to the disk behaviour from pack-0.92.
|
|
2077 |
"""
|
|
2078 |
||
2079 |
repository_class = KnitPackRepository |
|
2080 |
_commit_builder_class = PackCommitBuilder |
|
2081 |
_serializer = xml5.serializer_v5 |
|
2082 |
||
2083 |
def _get_matching_bzrdir(self): |
|
2084 |
return bzrdir.format_registry.make_bzrdir('development0') |
|
2085 |
||
2086 |
def _ignore_setting_bzrdir(self, format): |
|
2087 |
pass
|
|
2088 |
||
2089 |
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir) |
|
2090 |
||
2091 |
def get_format_string(self): |
|
2092 |
"""See RepositoryFormat.get_format_string().""" |
|
|
3152.2.3
by Robert Collins
Merge up with bzr.dev. |
2093 |
return "Bazaar development format 0 (needs bzr.dev from before 1.3)\n" |
|
3152.2.1
by Robert Collins
* A new repository format 'development' has been added. This format will |
2094 |
|
2095 |
def get_format_description(self): |
|
2096 |
"""See RepositoryFormat.get_format_description().""" |
|
2097 |
return ("Development repository format, currently the same as " |
|
2098 |
"pack-0.92\n") |
|
2099 |
||
2100 |
def check_conversion_target(self, target_format): |
|
2101 |
pass
|
|
2102 |
||
2103 |
||
2104 |
class RepositoryFormatPackDevelopment0Subtree(RepositoryFormatPack): |
|
2105 |
"""A subtrees development repository. |
|
2106 |
||
2107 |
This format should be retained until the second release after bzr 1.0.
|
|
2108 |
||
2109 |
No changes to the disk behaviour from pack-0.92-subtree.
|
|
2110 |
"""
|
|
2111 |
||
2112 |
repository_class = KnitPackRepository |
|
2113 |
_commit_builder_class = PackRootCommitBuilder |
|
2114 |
rich_root_data = True |
|
2115 |
supports_tree_reference = True |
|
2116 |
_serializer = xml7.serializer_v7 |
|
2117 |
||
2118 |
def _get_matching_bzrdir(self): |
|
2119 |
return bzrdir.format_registry.make_bzrdir( |
|
2120 |
'development0-subtree') |
|
2121 |
||
2122 |
def _ignore_setting_bzrdir(self, format): |
|
2123 |
pass
|
|
2124 |
||
2125 |
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir) |
|
2126 |
||
2127 |
def check_conversion_target(self, target_format): |
|
2128 |
if not target_format.rich_root_data: |
|
2129 |
raise errors.BadConversionTarget( |
|
2130 |
'Does not support rich root data.', target_format) |
|
2131 |
if not getattr(target_format, 'supports_tree_reference', False): |
|
2132 |
raise errors.BadConversionTarget( |
|
2133 |
'Does not support nested trees', target_format) |
|
2134 |
||
2135 |
def get_format_string(self): |
|
2136 |
"""See RepositoryFormat.get_format_string().""" |
|
2137 |
return ("Bazaar development format 0 with subtree support " |
|
|
3152.2.3
by Robert Collins
Merge up with bzr.dev. |
2138 |
"(needs bzr.dev from before 1.3)\n") |
|
3152.2.1
by Robert Collins
* A new repository format 'development' has been added. This format will |
2139 |
|
2140 |
def get_format_description(self): |
|
2141 |
"""See RepositoryFormat.get_format_description().""" |
|
2142 |
return ("Development repository format, currently the same as " |
|
2143 |
"pack-0.92-subtree\n") |
|
2144 |
||
2145 |