Changesets can be listed by changeset number.
The Git repository is here.
- Revision:
- 264
- Log:
Updated to cvslog2web version 1.16. This includes changes made for the
ROOL version of the script which were submitted back and subsequently
merged into the official source tree. Meanwhile, the previously checked-
in configuration file was from the 'devel' site tree; changed to 'live'.
- Author:
- rool
- Date:
- Thu Apr 23 18:46:14 +0100 2009
- Size:
- 50323 Bytes
- Properties:
- Property svn:executable is set
1 | #!/bin/env python |
2 | |
3 | # cvslog2web by Ethan Tira-Thompson |
4 | # Released under the GPL (http://www.gnu.org/copyleft/gpl.html) |
5 | # $Date: 2009/01/06 19:59:07 $ |
6 | # Provides syndicated (Atom) and HTML output from CVS commit logs |
7 | SCRIPT_REVISION="$Revision: 1.16 $" |
8 | SCRIPT_URL="http://ethan.tira-thompson.com/cvslog2web" |
9 | |
10 | ################################################################################ |
11 | ################ INSTALLATION ################ |
12 | ################################################################################ |
13 | |
14 | # To install this script, copy it into the CVSROOT directory of your |
15 | # repository, and then add the following line to CVSROOT/loginfo: |
16 | # ALL python $CVSROOT/CVSROOT/cvslog2web.py $CVSROOT %{sVv} [config-file] |
17 | |
18 | # Don't forget you can replace 'ALL' with a filter to only apply |
19 | # cvslog2web to certain modules, or apply different copies of cvslog2web |
20 | # (presumably with different configuration settings) to different modules |
21 | |
22 | # The optional config file can hold the configuration parameters shown below |
23 | # This is convenient to use the same script with different settings for |
24 | # different modules (as opposed to copying the script itself) |
25 | |
26 | # You probably also received some stylesheet and images. The default |
27 | # placement for these files depends on the directory layout of your website: |
28 | # Atom.css -- same directory as the feed output (FEEDOUT) |
29 | # cvslog2web.css -- HTMLOUT_CSS and PERMALINK_CSS, default to same directory as |
30 | # the html page (HTMLOUT), and root of the permalink structure respectively |
31 | # nav_*.gif -- same directory as PERMALINK_CSS |
32 | # *.png -- Root of permalink structure (PERMALINK_URL_PREFIX) |
33 | |
34 | ################################################################################ |
35 | ################ CONFIGURATION ################ |
36 | ################################################################################ |
37 | |
38 | import os,sys |
39 | |
40 | # Given the command line suggested above: |
41 | # arg 0 will be the script path (unused) |
42 | if len(sys.argv)>2: |
43 | root=sys.argv[1] # arg 1 will be the repository root (CVSROOT) |
44 | args=sys.argv[2].split(" ") # arg 2 is the directory of the commit followed by a list of files (space delimited) |
45 | # arg 3 is an optional configuration file parameter, which will override the defaults show below |
46 | # arg 3 is handled at the end of the configuration section |
47 | |
48 | #### These first three settings control the destination of the output #### |
49 | #### Set to None or empty string ("") to disable that format's generation #### |
50 | |
51 | # This is the directory to hold entries as individual html files |
52 | # If PERMALINKDIR is disabled, cannot generate links from the feed and HTML |
53 | # to permanent log entries (but that's perfectly legal to do) |
54 | # PERMALINKDIR should essentially be the root of your webserver, can use |
55 | # PERMALINK_STRUCTURE setting (below) to subdivide files into subdirectories |
56 | PERMALINKDIR="/Users/ejt/Sites/" |
57 | |
58 | # where to direct the Atom feed output, relative paths are interpreted from PERMALINKDIR |
59 | FEEDOUT="cvs.xml" |
60 | |
61 | # will hold the most recent MAXHISTORY entries, relative paths are interpreted from PERMALINKDIR |
62 | # This is intended as a quick list of recent entries, suitable for including into a larger page via SSI or frames |
63 | HTMLOUT="recent.html" |
64 | |
65 | |
66 | # Defaults sets the tmp directory to be created in the same location as the script |
67 | # This directory will hold status files between commits |
68 | # Be sure to think through moving this outside CVSROOT (all commiters need access to the same files) |
69 | TMPDIR=os.path.join(os.path.dirname(sys.argv[0]),"cvslog2web-tmp") |
70 | |
71 | # maximum amount of time (seconds) in which checkins with the same message will be grouped together |
72 | TIMEOUT=15*60 |
73 | |
74 | # number of checkins to retain |
75 | # this will be reset to max(MAXHISTORY,FEED_MAXHISTORY,HTMLOUT_MAXHISTORY) |
76 | MAXHISTORY=0 |
77 | |
78 | # domain to use for entry id tags and default email addresses |
79 | DOMAIN="example.org" |
80 | |
81 | # viewcvs integration -- links from output to diffs |
82 | # format has 4 fields available: filename, path, oldversion, newversion |
83 | DIFFLINKFORMAT="http://www.example.org/cgi/viewcvs.cgi/%(path)s.diff?r1=%(oldversion)s&r2=%(newversion)s" |
84 | |
85 | # For adds and removes, no diff is available, one of oldversion or newversion |
86 | # will be set (depending on add or remove), and the other will be an empty string |
87 | # Note how this default handles this by running them both together so it doesn't matter which is set |
88 | VIEWLINKFORMAT="http://www.example.org/cgi/viewcvs.cgi/%(path)s?rev=%(oldversion)s%(newversion)s&content-type=text/vnd.viewcvs-markup" |
89 | |
90 | # entry titles can be a file list or the first line of the commit message |
91 | TITLE_FILE_LIST, TITLE_MESSAGE_FIRST_LINE=range(2) |
92 | ENTRYTITLE=TITLE_MESSAGE_FIRST_LINE |
93 | |
94 | # if using message as title (TITLE_MESSAGE_FIRST_LINE), can either |
95 | # repeat that line again in the content, or just skip it |
96 | REPEAT_ALWAYS, REPEAT_WHEN_MULTIPLE, REPEAT_NEVER=range(3) |
97 | REPEAT_FIRST_LINE=REPEAT_WHEN_MULTIPLE |
98 | |
99 | # contact list, user names are looked up here, if not found, will fall |
100 | # back to information obtained from the system (password database and DOMAIN) |
101 | CONTACT={ |
102 | "ejt":("Ethan Tira-Thompson","ejt@cs.cmu.edu"), |
103 | "guest":("Guest",None) #use None or empty string ('') to hide email |
104 | } |
105 | |
106 | # Can drop the module name from paths in the file list |
107 | # Useful when you only have one module in the repository, or are filtering |
108 | # the input being passed to cvslog2web from the loginfo file |
109 | # Value is the depth to drop, so 0 drops nothing, 1 drops the first directory, etc. |
110 | DROP_MODULE=0 |
111 | |
112 | # controls whether feedback is given as each type of output is produced |
113 | VERBOSE=True |
114 | |
115 | |
116 | ################ ATOM feed customization ################ |
117 | FEEDTITLE="cvslog2web CVS" # title for the feed |
118 | FEEDDESCRIPTION="" # a short description of the feed |
119 | FEEDHOMELINK="http://www.example.org/" # a link to the "main" page |
120 | FEEDLOGO="" # spec says this image should be twice as wide as it is tall |
121 | FEEDICON="" # spec says this should be squared (as wide as tall) |
122 | FEEDENTRYPREFIX="Commit: " # prefix for entry titles in the feed output |
123 | # Controls generation of a list of <link rel="related"> tags for each file |
124 | # (not shown by many readers, and generally duplicates the file list in those that do.) |
125 | FEEDRELATED=False |
126 | FEED_MAXHISTORY=15 |
127 | |
128 | # FEEDSELFLINK *should* be set, but FEEDID *must* be set... if you don't |
129 | # provide FEEDSELFLINK, you will instead need to fill in FEEDID |
130 | # Further, the HTML index and permalink pages require the self link if you want them to contain links to the feed |
131 | # (why two separate settings? If you move the link, keep the link as the ID to maintain identity) |
132 | # These must be full, absolute URLs, not a relative path! |
133 | FEEDSELFLINK="http://www.example.org/cvs.xml" #self-link for the feed |
134 | FEEDID="" # a globally unique ID for the feed -- if empty, will be set to FEEDSELFLINK |
135 | |
136 | |
137 | ################ HTML output customization ################ |
138 | HTMLOUT_STANDALONE=True # whether to make the root element <html> to stand alone (vs. included via server-side include) |
139 | HTMLOUT_CSS="cvslog2web.css" # style sheet to include (only applies if HTML_STANDALONE is True) |
140 | HTMLTITLE="" # title for the html output, will fall back to FEEDTITLE if blank, use None to disable |
141 | HTMLOUT_ENTRYPREFIX="" # prefix for title lines; does *not* fall back on FEEDENTRYPREFIX |
142 | HTMLOUT_MESSAGE=True # set to False to skip the log body |
143 | HTMLOUT_FILELIST=True # set to False to skip the file list (also skipped if ENTRYTITLE==TITLE_FILE_LIST ) |
144 | HTMLOUT_MAXHISTORY=10 |
145 | # Prefix author list with the given string in a nested SPAN of class |
146 | # 'cvslog2web_authorsprefix' and don't use parenthesis around names. |
147 | # Works best if CSS drops Authors span onto separate line, e.g. |
148 | # by setting 'display: block'. If the string is empty, uses parenthesis |
149 | # around the comma-separated names with no prefix span. |
150 | HTMLOUT_AUTHORSPREFIX="" |
151 | # HTMLOUT_ORDER allows you to define the order of items in the summary |
152 | HO_TITLE,HO_AUTHORS,HO_TIMESTAMP,HO_MESSAGE,HO_FILELIST=range(5) |
153 | HTMLOUT_ORDER=[HO_TITLE,HO_AUTHORS,HO_MESSAGE,HO_FILELIST] |
154 | # Timestamp format if the HL_TIMESTAMP section is included (see above) |
155 | # according to 'strftime'. A space and a timezone indication is always |
156 | # appended to the string regardless of format; avoid '%Z' in the string. |
157 | HTMLOUT_TIMESTAMPFORMAT="%a. %B %d, %Y at %I:%M:%S %p" |
158 | # Optional prefix string, under class 'cvslog2web_timestampprefix' within |
159 | # the timestamp DIV. |
160 | HTMLOUT_TIMESTAMPPREFIX="Committed " |
161 | # Outer encapsulating DIV class for each entry in the list |
162 | HTMLOUT_OUTERCLASS="cvslog2web_entry" |
163 | |
164 | # This will be used as the 'target' attribute for all links on the page. |
165 | # (Handy if using the HTML output within a frame on your site, and you want the |
166 | # diffs to load in another frame.) Ignored if empty, otherwise specify a frame |
167 | # name from your site or one of the keywords _self, _parent, _top, or _blank. |
168 | HTMLOUT_TARGET="" |
169 | |
170 | ################ permanent link pages customization ################ |
171 | # Prefix for permalink URLs (structure/filename will be appended) |
172 | # Can be blank to use relative URLs from the feed/HTML pages |
173 | # Put any directory structure in PERMALINK_STRUCTURE, not here |
174 | # Only fill this in if you are specifying an absolute URL (i.e. starts with http://) |
175 | PERMALINK_URL_PREFIX="" |
176 | # Prefix for images; normally they reside in the same place as the permalinks |
177 | PERMALINK_IMG_PREFIX="" # if empty string, will fall back to PERMALINK_URL_PREFIX, use None to disable |
178 | # strftime format string for permalink files -- can spread among subdirs with '/' |
179 | # In a post-processing stage, microseconds are available via '%%(us)d' (with normal printf-style formatting, e.g. %%(us)06d) |
180 | PERMALINK_STRUCTURE="commits/%Y/%m/commit-%d-%H-%M-%S-%%(us)06d.html" |
181 | # style sheet, will fall back to PERMALINK_URL_PREFIX+HTMLOUT_CSS if blank, use None to disable |
182 | PERMALINK_CSS="" |
183 | # text to use in the prev/next buttons |
184 | PL_PREVTEXT="PREV" |
185 | PL_NEXTTEXT="NEXT" |
186 | # Mostly for debugging, causes all permalink pages in history to be regenerated |
187 | # Default causes full rebuild if the script is run directly (without cvs-provided arguments) |
188 | REBUILDPERMALINKS=len(sys.argv)<3 |
189 | |
190 | # PERMALINK_ORDER allows you to define the order of items on permalink pages |
191 | PL_AUTHORS,PL_TIMESTAMP,PL_MESSAGE,PL_FILELIST,PL_PREVLINK,PL_NEXTLINK,PL_FEEDLINK=range(7) |
192 | PERMALINK_ORDER=[PL_TIMESTAMP,PL_PREVLINK,PL_NEXTLINK,PL_AUTHORS,PL_MESSAGE,PL_FILELIST,PL_FEEDLINK] |
193 | # But check this out: you can also include an xhtml string to be written at |
194 | # any point in the page! If the string begins with '<' it is parsed as xml. |
195 | # If it starts with any other character it is escaped as plain text. It is |
196 | # not possible to have incomplete tags which span built-in elements. |
197 | PERMALINK_ORDER.insert(0,"<h1><a href=\""+FEEDHOMELINK+"\">Visit Project Homepage</a></h1>") |
198 | # As with HTMLOUT_STANDALONE - should permalink pages be full HTML documents |
199 | # or just fragments for inclusion in a wider page template? |
200 | PERMALINK_STANDALONE=True |
201 | # No JavaScript in Permalink pages - standard HTML links cover most cases but |
202 | # an onClick attribute directing the window location to the same URL is added |
203 | # for "belt and braces", unless overridden. |
204 | PERMALINK_ADDJS=True |
205 | # Outer encapsulating class for each permalink file's main content |
206 | PERMALINK_OUTERCLASS="cvslog2web_entry" |
207 | # If defined as an array of three strings, each string is taken to be the URL |
208 | # of an image to use in place of the "A" (first array entry), "M" (second array |
209 | # entry) and "R" (third and last array entry) letters, which indicate where files |
210 | # are added, modified or removed respectively in permalink pages. If an empty |
211 | # array, the letters are used. |
212 | PERMALINK_STATUSICONS=[] |
213 | |
214 | ################ LOAD EXTERNAL ################ |
215 | # load overrides from optional external configuration file (if specified) |
216 | try: |
217 | config_file=None |
218 | if len(sys.argv)>3: |
219 | config_file=os.path.join(os.path.dirname(sys.argv[0]),sys.argv[3]) |
220 | if len(sys.argv)==2: # running outside loginfo, just regenerate |
221 | config_file=os.path.join(os.path.dirname(sys.argv[0]),sys.argv[1]) |
222 | root="" |
223 | args=[] |
224 | if config_file: |
225 | if os.path.isfile(config_file): |
226 | execfile(config_file) |
227 | else: |
228 | sys.exit("cvslog2web could not find configuration file "+config_file) |
229 | except: |
230 | import traceback |
231 | print "The following exception occurred while processing external configuration file:" |
232 | traceback.print_exc() |
233 | print "Execution will continue with default settings" |
234 | |
235 | if not FEEDID: |
236 | FEEDID=FEEDSELFLINK |
237 | if HTMLTITLE=="": |
238 | HTMLTITLE=FEEDTITLE |
239 | if PERMALINK_IMG_PREFIX=="": |
240 | PERMALINK_IMG_PREFIX=PERMALINK_URL_PREFIX |
241 | elif not PERMALINK_IMG_PREFIX: |
242 | PERMALINK_IMG_PREFIX=""; |
243 | if PERMALINK_CSS=="": |
244 | if PERMALINK_URL_PREFIX: |
245 | PERMALINK_CSS=PERMALINK_URL_PREFIX+HTMLOUT_CSS |
246 | else: |
247 | PERMALINK_CSS="../"*PERMALINK_STRUCTURE.count("/")+HTMLOUT_CSS |
248 | # if these are already set to an absolute pathes, then these are no-ops |
249 | HTMLOUT=os.path.join(PERMALINKDIR,HTMLOUT) |
250 | FEEDOUT=os.path.join(PERMALINKDIR,FEEDOUT) |
251 | |
252 | |
253 | ################################################################################ |
254 | ################ INPUT PARSING ################ |
255 | ################################################################################ |
256 | # You don't want to change much below here... |
257 | # It's icky code from here on out. |
258 | |
259 | import re, pickle, time, datetime, pwd |
260 | import xml.dom, xml.dom.minidom |
261 | |
262 | MAXHISTORY=max(MAXHISTORY,FEED_MAXHISTORY,HTMLOUT_MAXHISTORY) |
263 | |
264 | curtime=datetime.datetime.utcnow() |
265 | curtime_str=curtime.strftime("%Y-%m-%dT%H:%M:%S.%%06d+00:00") % curtime.microsecond |
266 | |
267 | if os.path.exists(TMPDIR): |
268 | if not os.path.isdir(TMPDIR): sys.exit("cvslog2web: file blocking TMPDIR "+TMPDIR) |
269 | else: |
270 | os.makedirs(TMPDIR) |
271 | |
272 | # verify CVSROOT |
273 | root=root.rstrip(os.sep) #strip any extra "/" at the end |
274 | if root and not os.path.isdir(root): sys.exit("cvslog2web: bad CVSROOT: "+root) |
275 | |
276 | # Pull the cvslog2web script's version number out of the CVS keyword replacement |
277 | SCRIPT_VERSION=re.findall("[0-9.]+",SCRIPT_REVISION) |
278 | if len(SCRIPT_VERSION)==0: |
279 | sys.exit("cvslog2web: invalid SCRIPT_REVISION setting (no version number) "+SCRIPT_REVISION) |
280 | elif len(SCRIPT_VERSION)>1: |
281 | print "WARNING cvslog2web SCRIPT_REVISION contains multiple version strings?", SCRIPT_REVISION |
282 | SCRIPT_VERSION=SCRIPT_VERSION[0] |
283 | |
284 | if len(sys.argv)<3: |
285 | status=os.sep |
286 | else: |
287 | status=sys.stdin.readline()[:-1] |
288 | if not status.startswith("Update of "): sys.exit("cvslog2web: unrecognized cvs output") |
289 | status=status[len("Update of "):] |
290 | |
291 | if not status.startswith(root+os.sep): sys.exit("cvslog2web: commit outside repository?") |
292 | cidir=status[len(root)+1:] #don't leave intro '/' on cidir |
293 | |
294 | #first word is the directory, pop it off |
295 | cipop="" |
296 | while cipop!=cidir: |
297 | if len(args)==0: sys.exit("cvslog2web: Unable to parse cvs output") |
298 | cipop=os.path.join(cipop,args.pop(0)) |
299 | del cipop |
300 | |
301 | # test to see if this is the result of an import vs. regular commit |
302 | if len(sys.argv)>=3 and sys.argv[2].endswith(" - Imported sources"): |
303 | files=args[:-len(" - Imported sources")] |
304 | imported=True |
305 | else: |
306 | # args (set from command line) is a string with a series of filename,oldvers,newvers values |
307 | # this regular expression parses the string into a list of tuples |
308 | # This RE is smart enough to handle filenames with spaces or commas in them! |
309 | files=re.findall("(.*?),([0-9.]+|NONE),([0-9.]+|NONE) "," ".join(args)+" ") |
310 | imported=False |
311 | |
312 | # This function is used to convert a dotted decimal version string to a version list: "1.2.3" -> [1,2,3] |
313 | # This form is much more applicable to comparison -- lexigraphic comparison of original version string will get it wrong |
314 | def version2list(s): |
315 | if s=="NONE": return [] |
316 | return map(int,s.split(".")) |
317 | # Now this next bit converts files into a dictionary mapping of |
318 | # names to [oldversion,newversion] lists, using the list form of version numbers |
319 | # (we're going to add on to the list of values for each file) |
320 | files=dict([(x[0], map(version2list,x[1:])) for x in files]) |
321 | |
322 | # Enough of the command line arguments, now parse the stdin message |
323 | # We still need to get the status flag (add/modify/remove) for each file |
324 | # First we need to get the list of file names in each section |
325 | # Each of these sections at this point is just a space-delimited list of file names |
326 | added=[]; modified=[]; removed=[]; message=[]; importedFiles=[]; importedTag="" |
327 | if len(sys.argv)>=3: |
328 | line=sys.stdin.readline() |
329 | section=[] #empty initial section |
330 | while line: |
331 | if section is message: |
332 | if imported and line=="Status:\n": |
333 | message=["".join(message).strip()+"\n"] |
334 | section=[] |
335 | else: |
336 | section.append(line) |
337 | elif line=="Log Message:\n": |
338 | section=message |
339 | elif line=="Added Files:\n": |
340 | section=added |
341 | elif line=="Modified Files:\n": |
342 | section=modified |
343 | elif line=="Removed Files:\n": |
344 | section=removed |
345 | elif line.strip().startswith("Tag:"): |
346 | pass #branch tag, currently no-op... eventually it would be nice to store and track this |
347 | elif imported and (line.startswith("Vendor Tag:") or line.startswith("Release Tags:")): |
348 | message.append(line) |
349 | section=importedFiles |
350 | if line.startswith("Release Tags:"): |
351 | importedTag=line[len("Release Tags:"):].strip() |
352 | elif section is importedFiles: |
353 | m=re.findall("([A-Z]) (.*)",line[:-1]) |
354 | if len(m)==1: |
355 | importedFiles.append(m[0]) |
356 | else: |
357 | section.append(line[1:-1]) #strip initial tab and trailing newline/linefeed |
358 | line=sys.stdin.readline() |
359 | added="".join(added) |
360 | modified="".join(modified) |
361 | removed="".join(removed) |
362 | message="".join(message).strip() |
363 | importedFiles.sort(lambda x,y: cmp(x[1],y[1])) |
364 | |
365 | # Don't do anything with new directories (everyone runs with 'update -dP' anyway right?) |
366 | # Directories don't matter until there's something in them |
367 | if len(sys.argv)>=3 and sys.argv[2].endswith(" - New directory"): |
368 | # waited this long because CVS throws a hissy fit ("broken pipe...") |
369 | #if you don't read the log message before quitting |
370 | sys.exit() |
371 | |
372 | # Constants for symbolic reference to information |
373 | ADDED="add" |
374 | MODIFIED="mod" |
375 | REMOVED="del" |
376 | GHOST="ghost" # this comes up later... we won't get this directly in the input |
377 | |
378 | # We have the file names from the command line in 'files' (parsed above). |
379 | # Now we need to see which files are in which status section. |
380 | # A bit tricky because each space could be separating files |
381 | # or could be part of a filename itself. |
382 | |
383 | # Spaces are such a pain, especially when they are being used |
384 | # as the delimiter, and your input doesn't escape the "real" spaces! Grrr. |
385 | # *** Still not perfect... if "foo" and "foo bar" are both |
386 | # involved, this could confuse it. *** |
387 | def processFiles(l,tag,out): |
388 | partial="" |
389 | for f in l.split(" "): |
390 | f=partial+f |
391 | if f in files: |
392 | files[f].append(tag) |
393 | out[os.path.join(cidir,f)]=tuple(files[f]) |
394 | partial="" |
395 | else: |
396 | partial=f+" " |
397 | if partial.strip(): |
398 | print "WARNING: partial filename in", tag, "section: '"+partial.strip()+"'" |
399 | paths={} |
400 | processFiles(added,ADDED,paths) |
401 | processFiles(modified,MODIFIED,paths) |
402 | processFiles(removed,REMOVED,paths) |
403 | |
404 | # now paths is a dictionary mapping full path to (oldv,newv,status) tuples |
405 | |
406 | # We've got our input regarding the current log entry, |
407 | # need to compare that against previous entry and see if it's |
408 | # all part of the same commit (since this script will be called |
409 | # separately for each directory involved in the commit, but |
410 | # we want them all to be associated in the same log entry) |
411 | |
412 | # load the last checkin's message and time, with a default value if file not found |
413 | def readfile(name,default): |
414 | try: f=open(os.path.join(TMPDIR,name),"rb") |
415 | except: return default #doesn't exist, that's ok |
416 | else: |
417 | val=pickle.load(f) |
418 | f.close() |
419 | return val |
420 | |
421 | # lasttime is the datetime object from the last call to the script |
422 | lasttime=readfile("lasttime",curtime-datetime.timedelta(0,TIMEOUT)) |
423 | |
424 | # history is everything we know about each entry, up to MAXHISTORY long |
425 | # The format is described in the next several lines |
426 | history=readfile("history",[]) |
427 | |
428 | # These enumerations define the basic fields in each entry in history |
429 | PATH,DATETIME,TIMESTAMP,AUTHORS,MESSAGE,IMPORTEDFILES,IMPORTEDTAG=range(7) |
430 | |
431 | ### History format ### |
432 | # History is a pretty major, and somewhat complex structure. |
433 | # It's basically just a list of entries, where each entry is a list indexed |
434 | # by the enumerations listed above, defining basic format the log entries |
435 | # PATH element holds the 'paths' variable (dictionary mapping paths to version numbers and status flag) |
436 | # ID is a string holding the entries unique and immutable ID for the feed (also embeds an initial-commit timestamp) |
437 | # TIMESTAMP is the last updated timestamp (datetime object) |
438 | # AUTHORS is a list of (name,email) tuples (both are strings) |
439 | # MESSAGE is a string holding the log message read from stdin |
440 | ###################### |
441 | |
442 | # get the user's name and username |
443 | # This might be a unix-only feature, not the end of the world if you |
444 | # have to rely on the contact list in the configuration section, or |
445 | # substitute another method |
446 | pw_db=pwd.getpwuid(os.getuid()) |
447 | user=pw_db[0] # the "short" name, e.g. 'ejt' |
448 | user_name=pw_db[4] # the "full" name, e.g. 'Ethan Tira-Thompson' |
449 | |
450 | # If within the TIMEOUT and have the same message, merge |
451 | # the current file list with the first entry of the history |
452 | # Otherwise, add a new entry (popping old entries if len(history)>MAXHISTORY...) |
453 | droppedHistory=[] # stores popped entries, reused when rebuilding permalinks |
454 | if len(sys.argv)<3: |
455 | pass # rebuild shouldn't change any history entries |
456 | elif curtime-lasttime<datetime.timedelta(0,TIMEOUT) and len(history)>0 and history[0][MESSAGE]==message and not imported: |
457 | # merge is a little interesting: |
458 | # If the file is added and then modified with the same log message, |
459 | # merge as still added, just with the later revision number |
460 | # Removed and re-added is modified |
461 | # Similarly, modified then removed is just removed |
462 | # However, added then removed is as if never existed, but still need |
463 | # to store file info, (in case of re-add) hence the "ghost" status |
464 | def merge(v1,v2): |
465 | ov=min(v1[0],v2[0]) |
466 | nv=max(v1[1],v2[1]) |
467 | if v1[2]==v2[2]: |
468 | t=v1[2] |
469 | elif v1[2]==ADDED and v2[2]==REMOVED: |
470 | t=GHOST |
471 | elif v1[2]==REMOVED and v2[2]==ADDED: |
472 | t=MODIFIED |
473 | elif v1[2]==ADDED or v2[2]==ADDED: |
474 | t=ADDED |
475 | elif v1[2]==REMOVED or v2[2]==REMOVED: |
476 | t=REMOVED |
477 | else: |
478 | t=GHOST |
479 | return (ov,nv,t) |
480 | |
481 | for k,v in paths.iteritems(): |
482 | if history[0][PATH].setdefault(k,v)!=v: |
483 | history[0][PATH][k]=merge(history[0][PATH][k],v) |
484 | history[0][TIMESTAMP]=curtime_str |
485 | history[0][AUTHORS][user]=CONTACT.get(user,(user_name,user+"@"+DOMAIN)) |
486 | |
487 | else: # push paths as a new entry on its own |
488 | authors={user:CONTACT.get(user,(user,user+"@"+DOMAIN))} |
489 | history.insert(0,[paths,curtime,curtime_str,authors,message,importedFiles,importedTag]) |
490 | while len(history)>MAXHISTORY: droppedHistory.append(history.pop()) |
491 | |
492 | |
493 | ################################################################################ |
494 | ################ STATUS STORAGE ################ |
495 | ################################################################################ |
496 | # We want to write out the history as soon as possible to reduce the risk |
497 | # of a processing error causing us to drop or corrupt an entry. At least |
498 | # once it's stored in the file, if there's an error I can debug it and then |
499 | # we can regenerate the output. |
500 | if len(sys.argv)>=3: # don't touch files if it's a rebuild |
501 | f=open(os.path.join(TMPDIR,"lasttime"),"wb") |
502 | pickle.dump(curtime,f) |
503 | f.close() |
504 | f=open(os.path.join(TMPDIR,"history"),"wb") |
505 | pickle.dump(history,f) |
506 | f.close() |
507 | |
508 | |
509 | ################################################################################ |
510 | ################ FUNCTION DECLARATIONS ################ |
511 | ################################################################################ |
512 | |
513 | # shorthand for adding a subnode with a particular name and textual content |
514 | def appendTextualTag(node,name,str): |
515 | n=node.appendChild(doc.createElement(name)) |
516 | n.appendChild(doc.createTextNode(str)) |
517 | node.appendChild(doc.createTextNode("\n")) |
518 | return n |
519 | |
520 | # quick-n-dirty version of set(), when used with reduce() |
521 | def collect(x,y): |
522 | if y not in x: x.append(y) |
523 | return x |
524 | |
525 | # removes first DROP_MODULE directory names from path |
526 | def dropModules(path): |
527 | for i in range(DROP_MODULE): |
528 | x=path.find(os.sep) |
529 | if x==-1: |
530 | return "" |
531 | path=path[x+1:] |
532 | return path |
533 | |
534 | # Converts a list of file names to a more compact form, grouping |
535 | # those in directories together: |
536 | # /foo/bar and /foo/baz become /foo/{bar,baz} |
537 | def makeStr(names): |
538 | dirs=reduce(collect,[os.path.dirname(x) for x in names],[]) |
539 | dirs.sort(lambda x,y:-cmp(len(x),len(y))) #go from longest (deepest) to shortest (shallowest) |
540 | common={} |
541 | remain=names[:] |
542 | for d in dirs: |
543 | paths=filter(lambda x: x.startswith(d),remain) |
544 | paths.sort() |
545 | if len(paths)>1: |
546 | for p in paths: |
547 | common.setdefault(dropModules(d),[]).append(p[len(d)+1:]) |
548 | remain.remove(p) |
549 | for p in remain: |
550 | common.setdefault(dropModules(os.path.dirname(p)),[]).append(dropModules(p)) |
551 | ks=common.keys() |
552 | ks.sort() |
553 | strs=[] |
554 | for k in ks: |
555 | v=common[k] |
556 | if len(v)>1: |
557 | if k: |
558 | strs.append(os.path.join(k,"{"+(",".join(v))+"}")) |
559 | else: |
560 | strs.append(", ".join(v)) |
561 | else: |
562 | strs.append(v[0]) |
563 | return ", ".join(strs) |
564 | |
565 | # This older version is a bit more simplistic, but doesn't |
566 | # handle singleton paths as nicely |
567 | #def makeStr(names): |
568 | # dirs=reduce(collect,[os.path.dirname(x) for x in names],[]) |
569 | # strs=[] |
570 | # for d in dirs: |
571 | # files=map(os.path.basename,filter(lambda x: x.startswith(d),names)) |
572 | # if len(files)==1: |
573 | # strs.append(os.path.join(d,files)) |
574 | # else: |
575 | # strs.append(os.path.join(d,"{"+",".join(files)+"}")) |
576 | # return " ".join(strs) |
577 | |
578 | # convert version list to string representation |
579 | # [major, minor, patch, ...] -> major.minor.patch.... |
580 | def vers2str(v): |
581 | return ".".join(map(str,v)) |
582 | |
583 | |
584 | # this part computes titles and links for each entry in the history |
585 | histFiles=[] |
586 | for entry in history: |
587 | # generate title string |
588 | addstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==ADDED]) |
589 | modstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==MODIFIED]) |
590 | remstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==REMOVED]) |
591 | files=[] |
592 | if len(addstr)>0: |
593 | files.append("Added "+addstr) |
594 | if len(modstr)>0: |
595 | files.append("Modified "+modstr) |
596 | if len(remstr)>0: |
597 | files.append("Removed "+remstr) |
598 | files="; ".join(files) |
599 | histFiles.append(files) |
600 | |
601 | # Links are a bit tricky; this will generate a dictionary of |
602 | # "common paths", where each key is the path, value is a list of |
603 | # files (may include singleton paths) within that path |
604 | def makeLinks(paths): |
605 | dirs=reduce(collect,[os.path.dirname(x) for x in paths],[]) |
606 | dirs.sort(lambda x,y:-cmp(len(x),len(y))) #go from longest (deepest) to shortest (shallowest) |
607 | links={} |
608 | remain=paths |
609 | for d in dirs: |
610 | paths=filter(lambda x: x.startswith(d),remain) |
611 | if len(paths)>1: |
612 | paths.sort() |
613 | for p in paths: |
614 | info=entry[PATH][p] |
615 | links.setdefault(dropModules(d),[]).append(dict(path=p,filename=p[len(d)+1:],oldversion=vers2str(info[0]),newversion=vers2str(info[1]))) |
616 | remain.remove(p) |
617 | for p in remain: |
618 | info=entry[PATH][p] |
619 | links.setdefault(dropModules(os.path.dirname(p)),[]).append(dict(path=p,filename=dropModules(p),oldversion=vers2str(info[0]),newversion=vers2str(info[1]))) |
620 | return links |
621 | |
622 | histLinks=[] |
623 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE: |
624 | for entry in history: |
625 | addlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==ADDED]) |
626 | modlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==MODIFIED]) |
627 | remlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==REMOVED]) |
628 | links={} |
629 | if len(addlinks)>0: |
630 | links["Added: "]=addlinks |
631 | if len(modlinks)>0: |
632 | links["Modified: "]=modlinks |
633 | if len(remlinks)>0: |
634 | links["Removed: "]=remlinks |
635 | histLinks.append(links) |
636 | else: |
637 | for entry in history: |
638 | histLinks.append({"Diff: ": makeLinks(entry[PATH].keys())}) |
639 | |
640 | def genPermalink(t): |
641 | permdir=t.strftime(PERMALINK_STRUCTURE) % {"us":t.microsecond} |
642 | return PERMALINK_URL_PREFIX+permdir |
643 | |
644 | def genPermafile(t): |
645 | permdir=t.strftime(PERMALINK_STRUCTURE) % {"us":t.microsecond} |
646 | return os.path.join(PERMALINKDIR,permdir.replace("/",os.sep)) |
647 | |
648 | |
649 | ################################################################################ |
650 | ################ RSS (ATOM) OUTPUT ################ |
651 | ################################################################################ |
652 | |
653 | if FEEDOUT: |
654 | dom=xml.dom.getDOMImplementation() |
655 | doc=dom.createDocument("http://www.w3.org/2005/Atom","feed",None) |
656 | feed=doc.documentElement |
657 | doc.insertBefore(doc.createProcessingInstruction("xml-stylesheet",'href="Atom.css" type="text/css"'),feed) |
658 | feed.setAttribute("xmlns","http://www.w3.org/2005/Atom") |
659 | feed.appendChild(doc.createTextNode("\n")) |
660 | |
661 | # feed header tags |
662 | appendTextualTag(feed,"id",FEEDID) |
663 | appendTextualTag(feed,"title",FEEDTITLE) |
664 | if FEEDHOMELINK: |
665 | linknode=feed.appendChild(doc.createElement("link")) |
666 | linknode.setAttribute("rel","alternate") |
667 | linknode.setAttribute("href",FEEDHOMELINK) |
668 | feed.appendChild(doc.createTextNode("\n")) |
669 | if FEEDSELFLINK: |
670 | linknode=feed.appendChild(doc.createElement("link")) |
671 | linknode.setAttribute("rel","self") |
672 | linknode.setAttribute("href",FEEDSELFLINK) |
673 | feed.appendChild(doc.createTextNode("\n")) |
674 | if FEEDLOGO: |
675 | appendTextualTag(feed,"logo",FEEDLOGO) |
676 | if FEEDICON: |
677 | appendTextualTag(feed,"icon",FEEDICON) |
678 | if FEEDDESCRIPTION: |
679 | appendTextualTag(feed,"subtitle",FEEDDESCRIPTION) |
680 | appendTextualTag(feed,"updated",curtime_str) |
681 | generator=feed.appendChild(doc.createElement("generator")) |
682 | generator.setAttribute("uri",SCRIPT_URL) |
683 | generator.setAttribute("version",SCRIPT_VERSION) |
684 | generator.appendChild(doc.createTextNode("cvslog2web")) |
685 | feed.appendChild(doc.createTextNode("\n")) |
686 | |
687 | for (entry,files,links) in zip(history,histFiles,histLinks)[:FEED_MAXHISTORY]: |
688 | msg=entry[MESSAGE].splitlines() |
689 | |
690 | # entry header tags |
691 | node=feed.appendChild(doc.createElement("entry")) |
692 | node.appendChild(doc.createTextNode("\n")) |
693 | t=entry[DATETIME] |
694 | appendTextualTag(node,"id",entry[DATETIME].strftime("tag:%%s,%Y-%m-%d:%%05d.%%06d")%(DOMAIN,t.hour*60*60+t.minute*60+t.second,t.microsecond)) |
695 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: |
696 | appendTextualTag(node,"title",FEEDENTRYPREFIX+msg[0]) |
697 | elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: |
698 | appendTextualTag(node,"title",FEEDENTRYPREFIX+files) |
699 | else: |
700 | sys.exit("cvslog2web: bad ENTRYTITLE setting") |
701 | appendTextualTag(node,"updated",entry[TIMESTAMP]) |
702 | for n in entry[AUTHORS].itervalues(): |
703 | author=node.appendChild(doc.createElement("author")) |
704 | author.appendChild(doc.createElement("name")).appendChild(doc.createTextNode(n[0])) |
705 | if len(n)>1 and n[1]!="": |
706 | author.appendChild(doc.createElement("email")).appendChild(doc.createTextNode(n[1])) |
707 | node.appendChild(doc.createTextNode("\n")) |
708 | if PERMALINKDIR: |
709 | linknode=node.appendChild(doc.createElement("link")) |
710 | linknode.setAttribute("rel","alternate") |
711 | linknode.setAttribute("href",genPermalink(entry[DATETIME])) |
712 | linknode.setAttribute("type","text/html") |
713 | if FEEDRELATED: |
714 | for ll in links.itervalues(): |
715 | for group in ll.itervalues(): |
716 | for l in group: |
717 | linknode=node.appendChild(doc.createElement("link")) |
718 | linknode.setAttribute("rel","related") |
719 | linknode.setAttribute("title",l["path"]) |
720 | if l["oldversion"] and l["newversion"]: |
721 | linknode.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20")) |
722 | else: |
723 | linknode.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20")) |
724 | linknode.setAttribute("type","text/html") |
725 | |
726 | # CONTENT section |
727 | content=node.appendChild(doc.createElement("content")) |
728 | content.setAttribute("type","xhtml") |
729 | content=content.appendChild(doc.createElement("div")) |
730 | content.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
731 | content.appendChild(doc.createTextNode("\n")) |
732 | for i,m in enumerate(msg): |
733 | if i==0 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE and len(msg)==1): |
734 | continue |
735 | appendTextualTag(content,"div",m).setAttribute("style","padding:.25em 0;") |
736 | # End of message embedding |
737 | |
738 | #now add links to content |
739 | filelist=content.appendChild(doc.createElement("div")) |
740 | if len(msg)>1: |
741 | filelist.setAttribute("style","padding:.6em 0;") |
742 | for pre,ll in links.iteritems(): |
743 | diffs=filelist.appendChild(doc.createElement("div")) |
744 | diffs.setAttribute("style","padding:.25em 0;") |
745 | diffs.appendChild(doc.createTextNode(pre)) |
746 | diffs=diffs.appendChild(doc.createElement("tt")) |
747 | firstSet=True |
748 | for k,group in ll.iteritems(): |
749 | if not firstSet: |
750 | diffs.appendChild(doc.createTextNode(", ")) |
751 | if len(group)>1 and k: |
752 | diffs.appendChild(doc.createTextNode(os.path.join(k,"{"))) |
753 | firstLink=True |
754 | for l in group: |
755 | if not firstLink: |
756 | diffs.appendChild(doc.createTextNode(", ")) |
757 | a=diffs.appendChild(doc.createElement("a")) |
758 | if l["oldversion"] and l["newversion"]: |
759 | a.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20")) |
760 | else: |
761 | a.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20")) |
762 | a.appendChild(doc.createTextNode(l["filename"])) |
763 | firstLink=False |
764 | if len(group)>1 and k: |
765 | diffs.appendChild(doc.createTextNode("}")) |
766 | firstSet=False |
767 | filelist.appendChild(doc.createTextNode("\n")) |
768 | |
769 | #end of content |
770 | node.appendChild(doc.createTextNode("\n")) |
771 | |
772 | f=open(FEEDOUT,"wb") |
773 | doc.writexml(f) |
774 | f.close() |
775 | doc.unlink() |
776 | if VERBOSE: |
777 | print "Feed update successful" |
778 | |
779 | |
780 | ################################################################################ |
781 | ################ HTML OUTPUT ################ |
782 | ################################################################################ |
783 | |
784 | # some functions for HTML output, shared by HTMLOUT and PERMALINK output |
785 | def HTMLHeader(dom,title,css,rootclass): |
786 | doctype=dom.createDocumentType("html","//W3C//DTD XHTML 1.0 Transitional//EN","http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd") |
787 | doc=dom.createDocument("http://www.w3.org/1999/xhtml","html",doctype) |
788 | html=doc.documentElement |
789 | html.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
790 | html.appendChild(doc.createTextNode("\n")) |
791 | head=html.appendChild(doc.createElement("head")) |
792 | appendTextualTag(head,"title",title) |
793 | if css: |
794 | link=head.appendChild(doc.createElement("link")) |
795 | link.setAttribute("rel","stylesheet") |
796 | link.setAttribute("type","text/css") |
797 | link.setAttribute("href",css) |
798 | head.appendChild(doc.createTextNode("\n")) |
799 | if FEEDOUT and FEEDSELFLINK: |
800 | link=head.appendChild(doc.createElement("link")) |
801 | link.setAttribute("rel","alternate") |
802 | link.setAttribute("title","Atom Syndication Feed") |
803 | link.setAttribute("type","application/atom+xml") |
804 | link.setAttribute("href",FEEDSELFLINK) |
805 | head.appendChild(doc.createTextNode("\n")) |
806 | body=html.appendChild(doc.createElement("body")) |
807 | body.appendChild(doc.createTextNode("\n")) |
808 | rootdiv=body.appendChild(doc.createElement("div")) |
809 | body.appendChild(doc.createTextNode("\n")) |
810 | rootdiv.appendChild(doc.createTextNode("\n")) |
811 | rootdiv.setAttribute("class",rootclass) |
812 | return (doc,rootdiv) |
813 | |
814 | def appendLinks(filelist,pre,ll): |
815 | filelist=filelist.appendChild(doc.createElement("p")) |
816 | appendTextualTag(filelist,"span",pre).setAttribute("class","cvslog2web_filestatus") |
817 | firstSet=True |
818 | for k,v in ll.iteritems(): |
819 | if not firstSet: |
820 | filelist.appendChild(doc.createTextNode(", ")) |
821 | if len(v)>1 and k: |
822 | filelist.appendChild(doc.createTextNode(os.path.join(k,"{"))) |
823 | firstLink=True |
824 | for l in v: |
825 | if not firstLink: |
826 | filelist.appendChild(doc.createTextNode(", ")) |
827 | a=filelist.appendChild(doc.createElement("a")) |
828 | if HTMLOUT_TARGET: |
829 | a.setAttribute("target",HTMLOUT_TARGET) |
830 | if l["oldversion"] and l["newversion"]: |
831 | a.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20")) |
832 | else: |
833 | a.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20")) |
834 | a.appendChild(doc.createTextNode(l["filename"])) |
835 | firstLink=False |
836 | if len(v)>1 and k: |
837 | filelist.appendChild(doc.createTextNode("}")) |
838 | firstSet=False |
839 | |
840 | |
841 | ################ HTML STANDALONE OUTPUT ################ |
842 | |
843 | if HTMLOUT: |
844 | dom=xml.dom.getDOMImplementation() |
845 | if not HTMLOUT_STANDALONE: |
846 | if "createDocumentFragment" in dir(dom): |
847 | doc=dom.createDocumentFragment() |
848 | else: |
849 | doc=dom.createDocument(None,None,None) |
850 | rootdiv=doc.appendChild(doc.createElement("div")) |
851 | rootdiv.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
852 | rootdiv.appendChild(doc.createTextNode("\n")) |
853 | rootdiv.setAttribute("class","cvslog2web_index") |
854 | else: |
855 | doc,rootdiv=HTMLHeader(dom,HTMLTITLE,HTMLOUT_CSS,"cvslog2web_index") |
856 | |
857 | for (entry,files,links) in zip(history,histFiles,histLinks)[:HTMLOUT_MAXHISTORY]: |
858 | msg=entry[MESSAGE].splitlines() |
859 | |
860 | # entry header tags |
861 | node=rootdiv.appendChild(doc.createElement("div")) |
862 | node.setAttribute("class",HTMLOUT_OUTERCLASS) |
863 | node.appendChild(doc.createTextNode("\n")) |
864 | |
865 | for section in HTMLOUT_ORDER: |
866 | if section==HO_TITLE: |
867 | # Title section |
868 | titlenode=node.appendChild(doc.createElement("div")) |
869 | titlenode.setAttribute("class","cvslog2web_title") |
870 | if PERMALINKDIR: |
871 | a=appendTextualTag(titlenode,"a",HTMLOUT_ENTRYPREFIX) |
872 | if HTMLOUT_TARGET: |
873 | a.setAttribute("target",HTMLOUT_TARGET) |
874 | a.setAttribute("href",genPermalink(entry[DATETIME])) |
875 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: appendTextualTag(a,"span",msg[0]).setAttribute("class","cvslog2web_message") |
876 | elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: appendTextualTag(a,"span",files).setAttribute("class","cvslog2web_filelist") |
877 | else: sys.exit("cvslog2web: bad ENTRYTITLE setting") |
878 | else: |
879 | titlenode.appendChild(doc.createTextNode(HTMLOUT_ENTRYPREFIX)) |
880 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: appendTextualTag(titlenode,"span",msg[0]).setAttribute("class","cvslog2web_message") |
881 | elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: appendTextualTag(titlenode,"span",files).setAttribute("class","cvslog2web_filelist") |
882 | else: sys.exit("cvslog2web: bad ENTRYTITLE setting") |
883 | titlenode.appendChild(doc.createTextNode(" ")) |
884 | |
885 | elif section==HO_AUTHORS: |
886 | # Authors section |
887 | authors=titlenode.appendChild(doc.createElement("span")) |
888 | authors.setAttribute("class","cvslog2web_authors") |
889 | if HTMLOUT_AUTHORSPREFIX: |
890 | authorsprefix=authors.appendChild(doc.createElement("span")) |
891 | authorsprefix.setAttribute("class","cvslog2web_authorsprefix") |
892 | authorsprefix.appendChild(doc.createTextNode(HTMLOUT_AUTHORSPREFIX)) |
893 | authors.appendChild(doc.createTextNode(",".join([k for k,n in entry[AUTHORS].iteritems()]))) |
894 | else: |
895 | authors.appendChild(doc.createTextNode("("+",".join([k for k,n in entry[AUTHORS].iteritems()])+")")) |
896 | |
897 | elif section==HO_TIMESTAMP: |
898 | # Commit date and time |
899 | if time.daylight: |
900 | t=entry[DATETIME]-datetime.timedelta(seconds=time.altzone) |
901 | tz=time.tzname[1] |
902 | else: |
903 | t=entry[DATETIME]-datetime.timedelta(seconds=time.timezone) |
904 | tz=time.tzname[0] |
905 | timestampstr=t.strftime(HTMLOUT_TIMESTAMPFORMAT+" "+tz) |
906 | timestamp=node.appendChild(doc.createElement("div")) |
907 | timestamp.setAttribute("class","cvslog2web_timestamp") |
908 | if HTMLOUT_TIMESTAMPPREFIX: |
909 | timestampprefix=timestamp.appendChild(doc.createElement("span")) |
910 | timestampprefix.setAttribute("class","cvslog2web_timestampprefix") |
911 | timestampprefix.appendChild(doc.createTextNode(HTMLOUT_TIMESTAMPPREFIX)) |
912 | timestamp.appendChild(doc.createTextNode(timestampstr)) |
913 | |
914 | elif section==HO_MESSAGE: |
915 | # Content section |
916 | if HTMLOUT_MESSAGE: |
917 | content=node.appendChild(doc.createElement("div")) |
918 | content.setAttribute("class","cvslog2web_message") |
919 | content.appendChild(doc.createTextNode("\n")) |
920 | for i,m in enumerate(msg): |
921 | if i==0 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE and len(msg)==1): |
922 | continue |
923 | appendTextualTag(content,"p",m) |
924 | if len(msg)==1 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE): |
925 | node.removeChild(content) |
926 | |
927 | elif section==HO_FILELIST: |
928 | if len(links)>0 and HTMLOUT_FILELIST and ENTRYTITLE!=TITLE_FILE_LIST: |
929 | #now add links to content |
930 | filelist=node.appendChild(doc.createElement("div")) |
931 | filelist.setAttribute("class","cvslog2web_filelist") |
932 | filelist.appendChild(doc.createTextNode("\n")) |
933 | for pre,l in links.iteritems(): |
934 | appendLinks(filelist,pre,l) |
935 | |
936 | #end of content |
937 | node.appendChild(doc.createTextNode("\n")) |
938 | |
939 | gen=rootdiv.appendChild(doc.createElement("div")) |
940 | gen.setAttribute("class","cvslog2web_credit") |
941 | gen.appendChild(doc.createTextNode("Generated by ")) |
942 | a=appendTextualTag(gen,"a","cvslog2web") |
943 | a.setAttribute("target","_top") |
944 | a.setAttribute("href",SCRIPT_URL) |
945 | gen.appendChild(doc.createTextNode(SCRIPT_VERSION)) |
946 | |
947 | f=open(HTMLOUT,"wb") |
948 | if HTMLOUT_STANDALONE or "createDocumentFragment" in dir(dom): |
949 | doc.writexml(f) |
950 | else: |
951 | # createDocumentFragment unavailable, hack it and strip the xml processing instruction |
952 | s=doc.toxml() |
953 | s=s[s.find("\n")+1:] |
954 | f.write(s) |
955 | f.close() |
956 | doc.unlink() |
957 | if VERBOSE: |
958 | print "HTML update successful" |
959 | |
960 | |
961 | ################ HTML PERMALINK OUTPUT ################ |
962 | |
963 | def normalizeLink(link): |
964 | if not PERMALINK_URL_PREFIX: |
965 | return "../"*PERMALINK_STRUCTURE.count("/")+link |
966 | return link |
967 | |
968 | def permalinkStatusIcon(node,index,alt): |
969 | i=node.appendChild(doc.createElement("img")) |
970 | i.setAttribute("src",normalizeLink(PERMALINK_STATUSICONS[index])) |
971 | i.setAttribute("alt",alt) |
972 | |
973 | def writePermalink(entry,files,links,prevLink="",nextLink=""): |
974 | if prevLink: prevLink=normalizeLink(prevLink) |
975 | if nextLink: nextLink=normalizeLink(nextLink) |
976 | |
977 | permalink=genPermalink(entry[DATETIME]) |
978 | permafile=genPermafile(entry[DATETIME]) |
979 | permdir=os.path.dirname(permafile) |
980 | if os.path.exists(permdir): |
981 | if not os.path.isdir(permdir): sys.exit("cvslog2web: file blocking PERMALINKDIR "+permdir) |
982 | else: |
983 | os.makedirs(permdir) |
984 | |
985 | msg=entry[MESSAGE].splitlines() |
986 | |
987 | # entry header tags |
988 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: |
989 | title=FEEDENTRYPREFIX+msg[0] |
990 | elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: |
991 | title=FEEDENTRYPREFIX+files |
992 | else: |
993 | sys.exit("cvslog2web: bad ENTRYTITLE setting") |
994 | |
995 | dom=xml.dom.getDOMImplementation() |
996 | if not PERMALINK_STANDALONE: |
997 | if "createDocumentFragment" in dir(dom): |
998 | doc=dom.createDocumentFragment() |
999 | else: |
1000 | doc=dom.createDocument(None,None,None) |
1001 | rootdiv=doc.appendChild(doc.createElement("div")) |
1002 | rootdiv.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
1003 | rootdiv.appendChild(doc.createTextNode("\n")) |
1004 | rootdiv.setAttribute("class","cvslog2web_permalink") |
1005 | else: |
1006 | doc,rootdiv=HTMLHeader(dom,title,PERMALINK_CSS,"cvslog2web_permalink") |
1007 | |
1008 | node=rootdiv.appendChild(doc.createElement("div")) |
1009 | node.setAttribute("class",PERMALINK_OUTERCLASS) |
1010 | node.appendChild(doc.createTextNode("\n")) |
1011 | |
1012 | for section in PERMALINK_ORDER: |
1013 | # Previous link |
1014 | if section==PL_PREVLINK: |
1015 | n=node.appendChild(doc.createElement("div")) |
1016 | if prevLink: |
1017 | n.setAttribute("class","cvslog2web_nav_prev") |
1018 | if PERMALINK_ADDJS: n.setAttribute("onClick","window.location.href='"+prevLink+"'") |
1019 | a=n.appendChild(doc.createElement("a")) |
1020 | a.setAttribute("href",prevLink) |
1021 | a.appendChild(doc.createTextNode(PL_PREVTEXT)) |
1022 | else: |
1023 | n.setAttribute("class","cvslog2web_nav_prev_disabled") |
1024 | n.appendChild(doc.createTextNode(PL_PREVTEXT)) |
1025 | |
1026 | # Previous link |
1027 | elif section==PL_NEXTLINK: |
1028 | n=node.appendChild(doc.createElement("div")) |
1029 | if nextLink: |
1030 | n.setAttribute("class","cvslog2web_nav_next") |
1031 | if PERMALINK_ADDJS: n.setAttribute("onClick","window.location.href='"+nextLink+"'") |
1032 | a=n.appendChild(doc.createElement("a")) |
1033 | a.setAttribute("href",nextLink) |
1034 | a.appendChild(doc.createTextNode(PL_NEXTTEXT)) |
1035 | else: |
1036 | n.setAttribute("class","cvslog2web_nav_next_disabled") |
1037 | n.appendChild(doc.createTextNode(PL_NEXTTEXT)) |
1038 | |
1039 | # Feed link |
1040 | elif section==PL_FEEDLINK and FEEDOUT and FEEDSELFLINK: |
1041 | n=node.appendChild(doc.createElement("div")) |
1042 | n.setAttribute("class","cvslog2web_feedlink") |
1043 | a=n.appendChild(doc.createElement("a")) |
1044 | a.setAttribute("href",FEEDSELFLINK) |
1045 | i=a.appendChild(doc.createElement("img")) |
1046 | i.setAttribute("src",normalizeLink(PERMALINK_IMG_PREFIX+"atom_feed.png")) |
1047 | i.setAttribute("width","84") |
1048 | i.setAttribute("height","15") |
1049 | i.setAttribute("alt","Atom Badge") |
1050 | a.appendChild(doc.createTextNode(" ")) |
1051 | i=a.appendChild(doc.createElement("img")) |
1052 | i.setAttribute("src",normalizeLink(PERMALINK_IMG_PREFIX+"feed_icon.png")) |
1053 | i.setAttribute("width","15") |
1054 | i.setAttribute("height","15") |
1055 | i.setAttribute("alt","Feed Icon") |
1056 | |
1057 | # Title (Timestamp) |
1058 | elif section==PL_TIMESTAMP: |
1059 | if time.daylight: |
1060 | t=entry[DATETIME]-datetime.timedelta(seconds=time.altzone) |
1061 | tz=time.tzname[1] |
1062 | else: |
1063 | t=entry[DATETIME]-datetime.timedelta(seconds=time.timezone) |
1064 | tz=time.tzname[0] |
1065 | title=t.strftime("Commited %a. %B %d, %Y at %I:%M:%S %p "+tz) |
1066 | appendTextualTag(node,"div",title).setAttribute("class","cvslog2web_timestamp") |
1067 | |
1068 | # Authors |
1069 | elif section==PL_AUTHORS: |
1070 | authors=node.appendChild(doc.createElement("div")) |
1071 | authors.setAttribute("class","cvslog2web_authors") |
1072 | authors.appendChild(doc.createTextNode("\nfrom ")) |
1073 | first=True |
1074 | for n in entry[AUTHORS].itervalues(): |
1075 | if first: first=False |
1076 | else: authors.appendChild(doc.createTextNode(", ")) |
1077 | l=appendTextualTag(authors,"a",n[0]) |
1078 | l.setAttribute("href","mailto:"+n[1]) |
1079 | |
1080 | # Message |
1081 | elif section==PL_MESSAGE: |
1082 | content=node.appendChild(doc.createElement("div")) |
1083 | content.setAttribute("class","cvslog2web_message") |
1084 | for i,m in enumerate(msg): |
1085 | content.appendChild(doc.createTextNode("\n"+m)) |
1086 | content.appendChild(doc.createElement("br")) |
1087 | content.appendChild(doc.createTextNode("\n")) |
1088 | |
1089 | # Links |
1090 | elif section==PL_FILELIST: |
1091 | links=node.appendChild(doc.createElement("div")) |
1092 | links.setAttribute("class","cvslog2web_filelist") |
1093 | links.appendChild(doc.createTextNode("\n")) |
1094 | spaths=entry[PATH].keys() |
1095 | spaths.sort() |
1096 | for path in spaths: |
1097 | info=entry[PATH][path] |
1098 | status=links.appendChild(doc.createElement("span")) |
1099 | status.setAttribute("class","cvslog2web_filestatus") |
1100 | if len(PERMALINK_STATUSICONS)==3: |
1101 | if info[2]==ADDED: permalinkStatusIcon(status, 0, "A") |
1102 | elif info[2]==MODIFIED: permalinkStatusIcon(status, 1, "M") |
1103 | elif info[2]==REMOVED: permalinkStatusIcon(status, 2, "R") |
1104 | else: sys.exit("cvslog2web: bad entry[PATH] status flag") |
1105 | else: |
1106 | if info[2]==ADDED: status.appendChild(doc.createTextNode("A")) |
1107 | elif info[2]==MODIFIED: status.appendChild(doc.createTextNode("M")) |
1108 | elif info[2]==REMOVED: status.appendChild(doc.createTextNode("R")) |
1109 | else: sys.exit("cvslog2web: bad entry[PATH] status flag") |
1110 | a=links.appendChild(doc.createElement("a")) |
1111 | lid=dict(path=path,filename=os.path.basename(path),oldversion=vers2str(info[0]),newversion=vers2str(info[1])) |
1112 | if lid["oldversion"] and lid["newversion"]: |
1113 | a.setAttribute("href",(DIFFLINKFORMAT % lid).replace(" ","%20")) |
1114 | else: |
1115 | a.setAttribute("href",(VIEWLINKFORMAT % lid).replace(" ","%20")) |
1116 | a.appendChild(doc.createTextNode(dropModules(path))) |
1117 | links.appendChild(doc.createElement("br")) |
1118 | links.appendChild(doc.createTextNode("\n")) |
1119 | if len(entry)>IMPORTEDFILES and len(entry)>IMPORTEDTAG: #just for backward compatability |
1120 | for f in entry[IMPORTEDFILES]: |
1121 | status=links.appendChild(doc.createElement("span")) |
1122 | status.setAttribute("class","cvslog2web_filestatus") |
1123 | status.appendChild(doc.createTextNode(f[0])) |
1124 | a=links.appendChild(doc.createElement("a")) |
1125 | lid=dict(path=f[1],filename=os.path.basename(f[1]),oldversion="",newversion=entry[IMPORTEDTAG]) |
1126 | a.setAttribute("href",(VIEWLINKFORMAT % lid).replace(" ","%20")) |
1127 | a.appendChild(doc.createTextNode(dropModules(f[1]))) |
1128 | links.appendChild(doc.createElement("br")) |
1129 | links.appendChild(doc.createTextNode("\n")) |
1130 | |
1131 | # Special content (user defined string?) |
1132 | elif section.__class__=="".__class__: |
1133 | if not section.strip().startswith("<"): |
1134 | # doesn't start with a tag, we append as text |
1135 | node.appendChild(doc.createTextNode(section)) |
1136 | else: |
1137 | # Have to parse it so we can append it |
1138 | # If we just appended it as text, all the good stuff would be escaped away |
1139 | subdoc=xml.dom.minidom.parseString(section) |
1140 | for n in subdoc.childNodes: |
1141 | node.appendChild(subdoc.removeChild(n)) |
1142 | subdoc.unlink() |
1143 | |
1144 | #put a return in the source after eact section |
1145 | node.appendChild(doc.createTextNode("\n")) |
1146 | |
1147 | if PERMALINK_STANDALONE: |
1148 | gen=rootdiv.parentNode.appendChild(doc.createElement("div")) |
1149 | else: |
1150 | gen=rootdiv.appendChild(doc.createElement("div")) |
1151 | |
1152 | gen.setAttribute("class","cvslog2web_credit") |
1153 | gen.appendChild(doc.createTextNode("Generated by ")) |
1154 | a=appendTextualTag(gen,"a","cvslog2web") |
1155 | a.setAttribute("target","_top") |
1156 | a.setAttribute("href",SCRIPT_URL) |
1157 | gen.appendChild(doc.createTextNode(SCRIPT_VERSION)) |
1158 | |
1159 | f=open(permafile,"wb") |
1160 | if PERMALINK_STANDALONE or "createDocumentFragment" in dir(dom): |
1161 | doc.writexml(f) |
1162 | else: |
1163 | # createDocumentFragment unavailable, hack it and strip the xml processing instruction |
1164 | s=doc.toxml() |
1165 | s=s[s.find("\n")+1:] |
1166 | f.write(s) |
1167 | f.close() |
1168 | doc.unlink() |
1169 | |
1170 | if VERBOSE: |
1171 | print "Permalink generated:", genPermalink(entry[DATETIME]) |
1172 | |
1173 | def genPermalinkEntry(i): |
1174 | if i<len(history): return genPermalink(history[i][DATETIME]) |
1175 | return None |
1176 | |
1177 | if PERMALINKDIR: |
1178 | if REBUILDPERMALINKS: |
1179 | # Using this section will rewrite all the permalinks in the history (handy for development...) |
1180 | writePermalink(history[0],histFiles[0],histLinks[0],genPermalinkEntry(1)) |
1181 | for i in range(1,len(history)-1): |
1182 | writePermalink(history[i],histFiles[i],histLinks[i],genPermalinkEntry(i+1),genPermalinkEntry(i-1)) |
1183 | if len(sys.argv)<3 and len(history)>=MAXHISTORY: |
1184 | pass #don't do last permalink if it's a rebuilt -- would break link chain because we don't have previous |
1185 | elif len(droppedHistory)==0: |
1186 | i=len(history)-1 |
1187 | writePermalink(history[i],histFiles[i],histLinks[i],None,genPermalinkEntry(i-1)) |
1188 | else: |
1189 | i=len(history)-1 |
1190 | writePermalink(history[i],histFiles[i],histLinks[i],genPermalink(droppedHistory[-1][DATETIME]),genPermalinkEntry(i-1)) |
1191 | else: |
1192 | writePermalink(history[0],histFiles[0],histLinks[0],genPermalinkEntry(1)) |
1193 | curpl=genPermalinkEntry(0) |
1194 | if len(history)>1: |
1195 | writePermalink(history[1],histFiles[1],histLinks[1],genPermalinkEntry(2),curpl) |
1196 |