1"""
2Multi-archive support; convert policy and build queues to regular suites
4@contact: Debian FTP Master <ftpmaster@debian.org>
5@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
6@license: GNU General Public License version 2 or later
7"""
9# This program is free software; you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation; either version 2 of the License, or
12# (at your option) any later version.
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23################################################################################
25import psycopg2
26from daklib.dak_exceptions import DBUpdateError
27from daklib.config import Config
29import os
31################################################################################
34def _track_files_per_archive(cnf, c):
35 c.execute("SELECT id FROM archive")
36 (archive_id,) = c.fetchone()
38 if c.fetchone() is not None: 38 ↛ 39line 38 didn't jump to line 39, because the condition on line 38 was never true
39 raise DBUpdateError("Cannot automatically upgrade from installation with multiple archives.")
41 c.execute("""CREATE TABLE files_archive_map (
42 file_id INT NOT NULL REFERENCES files(id),
43 archive_id INT NOT NULL REFERENCES archive(id),
44 component_id INT NOT NULL REFERENCES component(id),
45 last_used TIMESTAMP DEFAULT NULL,
46 created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
47 PRIMARY KEY (file_id, archive_id, component_id)
48 )""")
50 c.execute("""INSERT INTO files_archive_map (file_id, archive_id, component_id)
51 SELECT f.id, %s, l.component
52 FROM files f
53 JOIN location l ON f.location = l.id""", (archive_id,))
55 c.execute("""UPDATE files f SET filename = substring(f.filename FROM c.name || '/(.*)')
56 FROM location l, component c
57 WHERE f.location = l.id AND l.component = c.id
58 AND f.filename LIKE c.name || '/%'""")
60 # NOTE: The location table would need these changes, but we drop it later
61 # anyway.
62 # c.execute("""UPDATE location l SET path = path || c.name || '/'
63 # FROM component c
64 # WHERE l.component = c.id
65 # AND l.path NOT LIKE '%/' || c.name || '/'""")
67 c.execute("DROP VIEW IF EXISTS binfiles_suite_component_arch")
68 c.execute("ALTER TABLE files DROP COLUMN location")
69 c.execute("DROP TABLE location")
72def _convert_policy_queues(cnf, c):
73 base = cnf['Dir::Base']
74 new_path = os.path.join(base, 'new')
75 policy_path = os.path.join(base, 'policy')
77 # Forget changes in (old) policy queues so they can be processed again.
78 c.execute("DROP TABLE IF EXISTS build_queue_policy_files")
79 c.execute("DROP TABLE IF EXISTS build_queue_files")
80 c.execute("DROP TABLE IF EXISTS changes_pending_binaries")
81 c.execute("DROP TABLE IF EXISTS changes_pending_source_files")
82 c.execute("DROP TABLE IF EXISTS changes_pending_source")
83 c.execute("DROP TABLE IF EXISTS changes_pending_files_map")
84 c.execute("DROP TABLE IF EXISTS changes_pending_files")
85 c.execute("DROP TABLE IF EXISTS changes_pool_files")
86 c.execute("DELETE FROM changes WHERE in_queue IS NOT NULL")
88 # newstage and unchecked are no longer queues
89 c.execute("""
90 DELETE FROM policy_queue
91 WHERE queue_name IN ('newstage', 'unchecked')
92 """)
94 # Create archive for NEW
95 c.execute("INSERT INTO archive (name, description, path, tainted, use_morgue, mode) VALUES ('new', 'new queue', %s, 't', 'f', '0640') RETURNING (id)", (new_path,))
96 (new_archive_id,) = c.fetchone()
98 # Create archive for policy queues
99 c.execute("INSERT INTO archive (name, description, path, use_morgue) VALUES ('policy', 'policy queues', %s, 'f') RETURNING (id)", (policy_path,))
100 (archive_id,) = c.fetchone()
102 # Add suites for policy queues
103 c.execute("""
104 INSERT INTO suite
105 (archive_id, suite_name, origin, label, description, signingkeys)
106 SELECT
107 %s, queue_name, origin, label, releasedescription, NULLIF(ARRAY[signingkey], ARRAY[NULL])
108 FROM policy_queue
109 WHERE queue_name NOT IN ('unchecked')
110 """, (archive_id,))
112 # move NEW to its own archive
113 c.execute("UPDATE suite SET archive_id=%s WHERE suite_name IN ('byhand', 'new')", (new_archive_id,))
115 c.execute("""ALTER TABLE policy_queue
116 DROP COLUMN origin,
117 DROP COLUMN label,
118 DROP COLUMN releasedescription,
119 DROP COLUMN signingkey,
120 DROP COLUMN stay_of_execution,
121 DROP COLUMN perms,
122 ADD COLUMN suite_id INT REFERENCES suite(id)
123 """)
125 c.execute("UPDATE policy_queue pq SET suite_id=s.id FROM suite s WHERE s.suite_name = pq.queue_name")
126 c.execute("ALTER TABLE policy_queue ALTER COLUMN suite_id SET NOT NULL")
128 c.execute("""INSERT INTO suite_architectures (suite, architecture)
129 SELECT pq.suite_id, sa.architecture
130 FROM policy_queue pq
131 JOIN suite ON pq.id = suite.policy_queue_id
132 JOIN suite_architectures sa ON suite.id = sa.suite
133 WHERE pq.queue_name NOT IN ('byhand', 'new')
134 GROUP BY pq.suite_id, sa.architecture""")
136 # We only add architectures from suite_architectures to only add
137 # arches actually in use. It's not too important to have the
138 # right set of arches for policy queues anyway unless you want
139 # to generate Packages indices.
140 c.execute("""INSERT INTO suite_architectures (suite, architecture)
141 SELECT DISTINCT pq.suite_id, sa.architecture
142 FROM policy_queue pq, suite_architectures sa
143 WHERE pq.queue_name IN ('byhand', 'new')""")
145 c.execute("""CREATE TABLE policy_queue_upload (
146 id SERIAL NOT NULL PRIMARY KEY,
147 policy_queue_id INT NOT NULL REFERENCES policy_queue(id),
148 target_suite_id INT NOT NULL REFERENCES suite(id),
149 changes_id INT NOT NULL REFERENCES changes(id),
150 source_id INT REFERENCES source(id),
151 UNIQUE (policy_queue_id, target_suite_id, changes_id)
152 )""")
154 c.execute("""CREATE TABLE policy_queue_upload_binaries_map (
155 policy_queue_upload_id INT REFERENCES policy_queue_upload(id) ON DELETE CASCADE,
156 binary_id INT REFERENCES binaries(id),
157 PRIMARY KEY (policy_queue_upload_id, binary_id)
158 )""")
160 c.execute("""
161 CREATE TABLE policy_queue_byhand_file (
162 id SERIAL NOT NULL PRIMARY KEY,
163 upload_id INT NOT NULL REFERENCES policy_queue_upload(id),
164 filename TEXT NOT NULL,
165 processed BOOLEAN NOT NULL DEFAULT 'f'
166 )""")
168 c.execute("""ALTER TABLE changes
169 DROP COLUMN in_queue,
170 DROP COLUMN approved_for
171 """)
174def _convert_build_queues(cnf, c):
175 base = cnf['Dir::Base']
176 build_queue_path = os.path.join(base, 'build-queues')
178 c.execute("INSERT INTO archive (name, description, path, tainted, use_morgue) VALUES ('build-queues', 'build queues', %s, 't', 'f') RETURNING id", [build_queue_path])
179 archive_id, = c.fetchone()
181 c.execute("ALTER TABLE build_queue ADD COLUMN suite_id INT REFERENCES suite(id)")
183 c.execute("""
184 INSERT INTO suite
185 (archive_id, suite_name, origin, label, description, signingkeys, notautomatic)
186 SELECT
187 %s, queue_name, origin, label, releasedescription, NULLIF(ARRAY[signingkey], ARRAY[NULL]), notautomatic
188 FROM build_queue
189 """, [archive_id])
190 c.execute("UPDATE build_queue bq SET suite_id=(SELECT id FROM suite s WHERE s.suite_name = bq.queue_name)")
191 c.execute("ALTER TABLE build_queue ALTER COLUMN suite_id SET NOT NULL")
193 c.execute("""INSERT INTO suite_architectures (suite, architecture)
194 SELECT bq.suite_id, sa.architecture
195 FROM build_queue bq
196 JOIN suite_build_queue_copy sbqc ON bq.id = sbqc.build_queue_id
197 JOIN suite ON sbqc.suite = suite.id
198 JOIN suite_architectures sa ON suite.id = sa.suite
199 GROUP BY bq.suite_id, sa.architecture""")
201 c.execute("""ALTER TABLE build_queue
202 DROP COLUMN path,
203 DROP COLUMN copy_files,
204 DROP COLUMN origin,
205 DROP COLUMN label,
206 DROP COLUMN releasedescription,
207 DROP COLUMN signingkey,
208 DROP COLUMN notautomatic""")
211def do_update(self):
212 print(__doc__)
213 try:
214 cnf = Config()
215 if 'Dir::Base' not in cnf: 215 ↛ 216line 215 didn't jump to line 216, because the condition on line 215 was never true
216 print("""
217MANUAL UPGRADE INSTRUCTIONS
218===========================
220This database update will convert policy and build queues to regular suites.
221For these archives will be created under Dir::Base:
223 NEW: <base>/new
224 policy queues: <base>/policy
225 build queues: <base>/build-queues
227Please add Dir::Base to dak.conf and try the update again. Once the database
228upgrade is finished, you will have to reprocess all uploads currently in
229policy queues: just move them back to unchecked manually.
230""")
231 raise DBUpdateError("Please update dak.conf and try again.")
233 c = self.db.cursor()
235 _track_files_per_archive(cnf, c)
236 _convert_policy_queues(cnf, c)
237 _convert_build_queues(cnf, c)
239 c.execute("UPDATE config SET value = '75' WHERE name = 'db_revision'")
240 self.db.commit()
242 except psycopg2.ProgrammingError as msg:
243 self.db.rollback()
244 raise DBUpdateError('Unable to apply sick update 75, rollback issued. Error message : %s' % (str(msg)))