Mercurial > p > roundup > code
view website/issues/extensions/spambayes.py @ 6593:e70e2789bc2c
issue2551189 - increase text search maxlength
This removes I think all the magic references to 25 and 30 (varchar
size) and replaces them with references to maxlength or maxlength+5.
I am not sure why the db column is 5 characters larger than the size
of what should be the max size of a word, but I'll keep the buffer
of 5 as making it 1/5 the size of maxlength makes less sense.
Also added tests for fts search in templating which were missing.
Added postgres, mysql and sqlite native indexing backends in which to
test fts. Added fts test to native-fts as well to make sure it's
working.
I want to commit this now for CI.
Todo:
add test cases for the use of FTS in the csv output in
actions.py. There is no test coverage of the match case there.
change maxlength to a higher value (50) as requested in the ticket.
Modify existing extremewords test cases to allow words > 25 and < 51
write code to migrate column sizes for mysql and postgresql to match
maxlength I will roll this into the version 7 schema update that
supports use of database fts support.
| author | John Rouillard <rouilj@ieee.org> |
|---|---|
| date | Tue, 25 Jan 2022 13:22:00 -0500 |
| parents | e46ce04d5bbc |
| children |
line wrap: on
line source
import re, math from roundup.cgi.actions import Action from roundup.cgi.exceptions import * from roundup.anypy import xmlrpc_ import socket REVPAT = re.compile(r'(r[0-9]+\b|rev(ision)? [0-9]+\b)') def extract_classinfo(db, classname, nodeid): node = db.getnode(classname, nodeid) authorage = node['creation'].timestamp() - \ db.getnode('user', node.get('author', node.get('creator')))['creation'].timestamp() authorid = node.get('author', node.get('creator')) content = db.getclass(classname).get(nodeid, 'content') tokens = ["klass:%s" % classname, "author:%s" % authorid, "authorage:%d" % int(math.log(authorage)), "hasrev:%s" % (REVPAT.search(content) is not None)] return (content, tokens) def train_spambayes(db, content, tokens, is_spam): spambayes_uri = db.config.detectors['SPAMBAYES_URI'] server = xmlrpc_.client.ServerProxy(spambayes_uri, verbose=False) try: server.train({'content':content}, tokens, {}, is_spam) return (True, None) except (socket.error, xmlrpc_.client.Error) as e: return (False, str(e)) class SpambayesClassify(Action): permissionType = 'SB: May Classify' def handle(self): (content, tokens) = extract_classinfo(self.db, self.classname, self.nodeid) if "trainspam" in self.form: is_spam = True elif "trainham" in self.form: is_spam = False (status, errmsg) = train_spambayes(self.db, content, tokens, is_spam) node = self.db.getnode(self.classname, self.nodeid) props = {} if status: if node.get('spambayes_misclassified', False): props['spambayes_misclassified'] = True props['spambayes_score'] = 1.0 s = " SPAM" if not is_spam: props['spambayes_score'] = 0.0 s = " HAM" self.client.add_ok_message(self._('Message classified as') + s) else: self.client.add_error_message(self._('Unable to classify message, got error:') + errmsg) klass = self.db.getclass(self.classname) klass.set(self.nodeid, **props) self.db.commit() def sb_is_spam(obj): cutoff_score = float(obj._db.config.detectors['SPAMBAYES_SPAM_CUTOFF']) try: score = obj['spambayes_score'] except KeyError: return False return score >= cutoff_score def init(instance): instance.registerAction("spambayes_classify", SpambayesClassify) instance.registerUtil('sb_is_spam', sb_is_spam)
