1""" 

2Multi-archive support; convert policy and build queues to regular suites 

3 

4@contact: Debian FTP Master <ftpmaster@debian.org> 

5@copyright: 2012 Ansgar Burchardt <ansgar@debian.org> 

6@license: GNU General Public License version 2 or later 

7""" 

8 

9# This program is free software; you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation; either version 2 of the License, or 

12# (at your option) any later version. 

13 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18 

19# You should have received a copy of the GNU General Public License 

20# along with this program; if not, write to the Free Software 

21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 

22 

23################################################################################ 

24 

25import os 

26 

27import psycopg2 

28 

29from daklib.config import Config 

30from daklib.dak_exceptions import DBUpdateError 

31 

32################################################################################ 

33 

34 

35def _track_files_per_archive(cnf, c): 

36 c.execute("SELECT id FROM archive") 

37 (archive_id,) = c.fetchone() 

38 

39 if c.fetchone() is not None: 39 ↛ 40line 39 didn't jump to line 40, because the condition on line 39 was never true

40 raise DBUpdateError( 

41 "Cannot automatically upgrade from installation with multiple archives." 

42 ) 

43 

44 c.execute( 

45 """CREATE TABLE files_archive_map ( 

46 file_id INT NOT NULL REFERENCES files(id), 

47 archive_id INT NOT NULL REFERENCES archive(id), 

48 component_id INT NOT NULL REFERENCES component(id), 

49 last_used TIMESTAMP DEFAULT NULL, 

50 created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, 

51 PRIMARY KEY (file_id, archive_id, component_id) 

52 )""" 

53 ) 

54 

55 c.execute( 

56 """INSERT INTO files_archive_map (file_id, archive_id, component_id) 

57 SELECT f.id, %s, l.component 

58 FROM files f 

59 JOIN location l ON f.location = l.id""", 

60 (archive_id,), 

61 ) 

62 

63 c.execute( 

64 """UPDATE files f SET filename = substring(f.filename FROM c.name || '/(.*)') 

65 FROM location l, component c 

66 WHERE f.location = l.id AND l.component = c.id 

67 AND f.filename LIKE c.name || '/%'""" 

68 ) 

69 

70 # NOTE: The location table would need these changes, but we drop it later 

71 # anyway. 

72 # c.execute("""UPDATE location l SET path = path || c.name || '/' 

73 # FROM component c 

74 # WHERE l.component = c.id 

75 # AND l.path NOT LIKE '%/' || c.name || '/'""") 

76 

77 c.execute("DROP VIEW IF EXISTS binfiles_suite_component_arch") 

78 c.execute("ALTER TABLE files DROP COLUMN location") 

79 c.execute("DROP TABLE location") 

80 

81 

82def _convert_policy_queues(cnf, c): 

83 base = cnf["Dir::Base"] 

84 new_path = os.path.join(base, "new") 

85 policy_path = os.path.join(base, "policy") 

86 

87 # Forget changes in (old) policy queues so they can be processed again. 

88 c.execute("DROP TABLE IF EXISTS build_queue_policy_files") 

89 c.execute("DROP TABLE IF EXISTS build_queue_files") 

90 c.execute("DROP TABLE IF EXISTS changes_pending_binaries") 

91 c.execute("DROP TABLE IF EXISTS changes_pending_source_files") 

92 c.execute("DROP TABLE IF EXISTS changes_pending_source") 

93 c.execute("DROP TABLE IF EXISTS changes_pending_files_map") 

94 c.execute("DROP TABLE IF EXISTS changes_pending_files") 

95 c.execute("DROP TABLE IF EXISTS changes_pool_files") 

96 c.execute("DELETE FROM changes WHERE in_queue IS NOT NULL") 

97 

98 # newstage and unchecked are no longer queues 

99 c.execute( 

100 """ 

101 DELETE FROM policy_queue 

102 WHERE queue_name IN ('newstage', 'unchecked') 

103 """ 

104 ) 

105 

106 # Create archive for NEW 

107 c.execute( 

108 "INSERT INTO archive (name, description, path, tainted, use_morgue, mode) VALUES ('new', 'new queue', %s, 't', 'f', '0640') RETURNING (id)", 

109 (new_path,), 

110 ) 

111 (new_archive_id,) = c.fetchone() 

112 

113 # Create archive for policy queues 

114 c.execute( 

115 "INSERT INTO archive (name, description, path, use_morgue) VALUES ('policy', 'policy queues', %s, 'f') RETURNING (id)", 

116 (policy_path,), 

117 ) 

118 (archive_id,) = c.fetchone() 

119 

120 # Add suites for policy queues 

121 c.execute( 

122 """ 

123 INSERT INTO suite 

124 (archive_id, suite_name, origin, label, description, signingkeys) 

125 SELECT 

126 %s, queue_name, origin, label, releasedescription, NULLIF(ARRAY[signingkey], ARRAY[NULL]) 

127 FROM policy_queue 

128 WHERE queue_name NOT IN ('unchecked') 

129 """, 

130 (archive_id,), 

131 ) 

132 

133 # move NEW to its own archive 

134 c.execute( 

135 "UPDATE suite SET archive_id=%s WHERE suite_name IN ('byhand', 'new')", 

136 (new_archive_id,), 

137 ) 

138 

139 c.execute( 

140 """ALTER TABLE policy_queue 

141 DROP COLUMN origin, 

142 DROP COLUMN label, 

143 DROP COLUMN releasedescription, 

144 DROP COLUMN signingkey, 

145 DROP COLUMN stay_of_execution, 

146 DROP COLUMN perms, 

147 ADD COLUMN suite_id INT REFERENCES suite(id) 

148 """ 

149 ) 

150 

151 c.execute( 

152 "UPDATE policy_queue pq SET suite_id=s.id FROM suite s WHERE s.suite_name = pq.queue_name" 

153 ) 

154 c.execute("ALTER TABLE policy_queue ALTER COLUMN suite_id SET NOT NULL") 

155 

156 c.execute( 

157 """INSERT INTO suite_architectures (suite, architecture) 

158 SELECT pq.suite_id, sa.architecture 

159 FROM policy_queue pq 

160 JOIN suite ON pq.id = suite.policy_queue_id 

161 JOIN suite_architectures sa ON suite.id = sa.suite 

162 WHERE pq.queue_name NOT IN ('byhand', 'new') 

163 GROUP BY pq.suite_id, sa.architecture""" 

164 ) 

165 

166 # We only add architectures from suite_architectures to only add 

167 # arches actually in use. It's not too important to have the 

168 # right set of arches for policy queues anyway unless you want 

169 # to generate Packages indices. 

170 c.execute( 

171 """INSERT INTO suite_architectures (suite, architecture) 

172 SELECT DISTINCT pq.suite_id, sa.architecture 

173 FROM policy_queue pq, suite_architectures sa 

174 WHERE pq.queue_name IN ('byhand', 'new')""" 

175 ) 

176 

177 c.execute( 

178 """CREATE TABLE policy_queue_upload ( 

179 id SERIAL NOT NULL PRIMARY KEY, 

180 policy_queue_id INT NOT NULL REFERENCES policy_queue(id), 

181 target_suite_id INT NOT NULL REFERENCES suite(id), 

182 changes_id INT NOT NULL REFERENCES changes(id), 

183 source_id INT REFERENCES source(id), 

184 UNIQUE (policy_queue_id, target_suite_id, changes_id) 

185 )""" 

186 ) 

187 

188 c.execute( 

189 """CREATE TABLE policy_queue_upload_binaries_map ( 

190 policy_queue_upload_id INT REFERENCES policy_queue_upload(id) ON DELETE CASCADE, 

191 binary_id INT REFERENCES binaries(id), 

192 PRIMARY KEY (policy_queue_upload_id, binary_id) 

193 )""" 

194 ) 

195 

196 c.execute( 

197 """ 

198 CREATE TABLE policy_queue_byhand_file ( 

199 id SERIAL NOT NULL PRIMARY KEY, 

200 upload_id INT NOT NULL REFERENCES policy_queue_upload(id), 

201 filename TEXT NOT NULL, 

202 processed BOOLEAN NOT NULL DEFAULT 'f' 

203 )""" 

204 ) 

205 

206 c.execute( 

207 """ALTER TABLE changes 

208 DROP COLUMN in_queue, 

209 DROP COLUMN approved_for 

210 """ 

211 ) 

212 

213 

214def _convert_build_queues(cnf, c): 

215 base = cnf["Dir::Base"] 

216 build_queue_path = os.path.join(base, "build-queues") 

217 

218 c.execute( 

219 "INSERT INTO archive (name, description, path, tainted, use_morgue) VALUES ('build-queues', 'build queues', %s, 't', 'f') RETURNING id", 

220 [build_queue_path], 

221 ) 

222 (archive_id,) = c.fetchone() 

223 

224 c.execute("ALTER TABLE build_queue ADD COLUMN suite_id INT REFERENCES suite(id)") 

225 

226 c.execute( 

227 """ 

228 INSERT INTO suite 

229 (archive_id, suite_name, origin, label, description, signingkeys, notautomatic) 

230 SELECT 

231 %s, queue_name, origin, label, releasedescription, NULLIF(ARRAY[signingkey], ARRAY[NULL]), notautomatic 

232 FROM build_queue 

233 """, 

234 [archive_id], 

235 ) 

236 c.execute( 

237 "UPDATE build_queue bq SET suite_id=(SELECT id FROM suite s WHERE s.suite_name = bq.queue_name)" 

238 ) 

239 c.execute("ALTER TABLE build_queue ALTER COLUMN suite_id SET NOT NULL") 

240 

241 c.execute( 

242 """INSERT INTO suite_architectures (suite, architecture) 

243 SELECT bq.suite_id, sa.architecture 

244 FROM build_queue bq 

245 JOIN suite_build_queue_copy sbqc ON bq.id = sbqc.build_queue_id 

246 JOIN suite ON sbqc.suite = suite.id 

247 JOIN suite_architectures sa ON suite.id = sa.suite 

248 GROUP BY bq.suite_id, sa.architecture""" 

249 ) 

250 

251 c.execute( 

252 """ALTER TABLE build_queue 

253 DROP COLUMN path, 

254 DROP COLUMN copy_files, 

255 DROP COLUMN origin, 

256 DROP COLUMN label, 

257 DROP COLUMN releasedescription, 

258 DROP COLUMN signingkey, 

259 DROP COLUMN notautomatic""" 

260 ) 

261 

262 

263def do_update(self): 

264 print(__doc__) 

265 try: 

266 cnf = Config() 

267 if "Dir::Base" not in cnf: 267 ↛ 268line 267 didn't jump to line 268, because the condition on line 267 was never true

268 print( 

269 """ 

270MANUAL UPGRADE INSTRUCTIONS 

271=========================== 

272 

273This database update will convert policy and build queues to regular suites. 

274For these archives will be created under Dir::Base: 

275 

276 NEW: <base>/new 

277 policy queues: <base>/policy 

278 build queues: <base>/build-queues 

279 

280Please add Dir::Base to dak.conf and try the update again. Once the database 

281upgrade is finished, you will have to reprocess all uploads currently in 

282policy queues: just move them back to unchecked manually. 

283""" 

284 ) 

285 raise DBUpdateError("Please update dak.conf and try again.") 

286 

287 c = self.db.cursor() 

288 

289 _track_files_per_archive(cnf, c) 

290 _convert_policy_queues(cnf, c) 

291 _convert_build_queues(cnf, c) 

292 

293 c.execute("UPDATE config SET value = '75' WHERE name = 'db_revision'") 

294 self.db.commit() 

295 

296 except psycopg2.ProgrammingError as msg: 

297 self.db.rollback() 

298 raise DBUpdateError( 

299 "Unable to apply sick update 75, rollback issued. Error message : %s" 

300 % (str(msg)) 

301 )