1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
# Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import hashlib
import shutil
import tempfile
import unittest
from canonical.database.sqlbase import flush_database_updates
from canonical.librarian import db
from canonical.librarian.storage import (
DigestMismatchError,
DuplicateFileIDError,
LibrarianStorage,
LibraryFileUpload,
)
from canonical.testing.layers import LaunchpadZopelessLayer
from lp.services.librarian.model import LibraryFileContent
class LibrarianStorageDBTests(unittest.TestCase):
layer = LaunchpadZopelessLayer
def setUp(self):
self.layer.switchDbUser('librarian')
self.directory = tempfile.mkdtemp()
self.storage = LibrarianStorage(self.directory, db.Library())
def tearDown(self):
shutil.rmtree(self.directory, ignore_errors=True)
def test_addFile(self):
data = 'data ' * 50
digest = hashlib.sha1(data).hexdigest()
newfile = self.storage.startAddFile('file1', len(data))
newfile.srcDigest = digest
newfile.append(data)
fileid, aliasid = newfile.store()
self.failUnless(self.storage.hasFile(fileid))
def test_addFiles_identical(self):
# Start adding two files with identical data
data = 'data ' * 5000
newfile1 = self.storage.startAddFile('file1', len(data))
newfile2 = self.storage.startAddFile('file2', len(data))
newfile1.append(data)
newfile2.append(data)
id1, alias1 = newfile1.store()
id2, alias2 = newfile2.store()
# Make sure we actually got an id
self.assertNotEqual(None, id1)
self.assertNotEqual(None, id2)
# But they are two different ids, because we leave duplicate handling
# to the garbage collector
self.failIfEqual(id1, id2)
def test_badDigest(self):
data = 'data ' * 50
digest = 'crud'
newfile = self.storage.startAddFile('file', len(data))
newfile.srcDigest = digest
newfile.append(data)
self.assertRaises(DigestMismatchError, newfile.store)
def test_alias(self):
# Add a file (and so also add an alias)
data = 'data ' * 50
newfile = self.storage.startAddFile('file1', len(data))
newfile.mimetype = 'text/unknown'
newfile.append(data)
fileid, aliasid = newfile.store()
# Check that its alias has the right mimetype
fa = self.storage.getFileAlias(aliasid, None, '/')
self.assertEqual('text/unknown', fa.mimetype)
# Re-add the same file, with the same name and mimetype...
newfile2 = self.storage.startAddFile('file1', len(data))
newfile2.mimetype = 'text/unknown'
newfile2.append(data)
fileid2, aliasid2 = newfile2.store()
# Verify that we didn't get back the same alias ID
self.assertNotEqual(fa.id,
self.storage.getFileAlias(aliasid2, None, '/').id)
def test_clientProvidedDuplicateIDs(self):
# This test checks the new behaviour specified by LibrarianTransactions
# spec: don't create IDs in DB, but do check they don't exist.
# Create a new file
newfile = LibraryFileUpload(self.storage, 'filename', 0)
# Set a content ID on the file (same as would happen with a
# client-generated ID) and store it
newfile.contentID = 666
newfile.store()
newfile = LibraryFileUpload(self.storage, 'filename', 0)
newfile.contentID = 666
self.assertRaises(DuplicateFileIDError, newfile.store)
def test_clientProvidedDuplicateContent(self):
# Check the new behaviour specified by LibrarianTransactions spec: allow
# duplicate content with distinct IDs.
content = 'some content'
# Store a file with id 6661
newfile1 = LibraryFileUpload(self.storage, 'filename', 0)
newfile1.contentID = 6661
newfile1.append(content)
fileid1, aliasid1 = newfile1.store()
# Store second file identical to the first, with id 6662
newfile2 = LibraryFileUpload(self.storage, 'filename', 0)
newfile2.contentID = 6662
newfile2.append(content)
fileid2, aliasid2 = newfile2.store()
# Create rows in the database for these files.
LibraryFileContent(filesize=0, sha1='foo', md5='xx', id=6661)
LibraryFileContent(filesize=0, sha1='foo', md5='xx', id=6662)
flush_database_updates()
# And no errors should have been raised!
|