Lucene.net是Lucene的.net移植版本,是一个开源的全文检索引擎开发包,即它不是一个完整的全文检索引擎,而是一个全文检索引擎的架构,是一个Library.你也可以把它理解为一个将索引,搜索功能封装的很好的一套简单易用的API(提供了完整的查询引擎和索引引擎)。利用这套API你可以做很多有关搜索的事情,而且很方便.。开发人员可以基于Lucene.net实现全文检索的功能。 注意:Lucene.Net只能对文本信息进行检索。如果不是文本信息,要转换为文本信息,比如要检索Excel文件,就要用NPOI把Excel读取成字符串,然后把字符串扔给Lucene.Net。Lucene.Net会把扔给它的文本切词保存,加快检索速度。 更多概念性的知识可以参考这篇博文:http://blog.csdn.net/xiucool/archive/2008/11/28/3397182.aspx 这个小Demo样例展示:
学习Lucune.Net,分词是核心。当然最理想状态下是能自己扩展分词,但这要很高的算法要求。Lucene.Net中不同的分词算法就是不同的类。所有分词算法类都从Analyzer类继承,不同的分词算法有不同的优缺点。
Analyzer analyzer = new StandardAnalyzer();
TokenStream tokenStream = analyzer.TokenStream("",new StringReader("Hello Lucene.Net,我1爱1你China"));
Lucene.Net.Analysis.Token token = null;
while ((token = tokenStream.Next()) != null)
{
Console.WriteLine(token.TermText());
}
Analyzer analyzer = new CJKAnalyzer();
TokenStream tokenStream = analyzer.TokenStream("", new StringReader("我爱你中国China中华人名共和国"));
Lucene.Net.Analysis.Token token = null;
while ((token = tokenStream.Next()) != null)
{
Response.Write(token.TermText()+"<br/>");
}
这时,你肯定在想,上面没有一个好用的,二元分词算法乱枪打鸟,很想自己扩展Analyzer,但并不是算法上的专业人士。怎么办?
创建索引库操作:
private void CreateIndex()
{
//索引库存放在这个文件夹里
string indexPath = ConfigurationManager.AppSettings["pathIndex"];
//Directory表示索引文件保存的地方,是抽象类,两个子类FSDirectory表示文件中,RAMDirectory 表示存储在内存中
FSDirectory directory = FSDirectory.Open(new DirectoryInfo(indexPath), new NativeFSLockFactory());
//判断目录directory是否是一个索引目录。
bool isUpdate = IndexReader.IndexExists(directory);
logger.Debug("索引库存在状态:"+isUpdate);
if (isUpdate)
{
if (IndexWriter.IsLocked(directory))
{
IndexWriter.Unlock(directory);
}
}
//第三个参数为是否创建索引文件夹,Bool Create,如果为True,则新创建的索引会覆盖掉原来的索引文件,反之,则不必创建,更新即可。
IndexWriter write = new IndexWriter(directory, new PanGuAnalyzer(), !isUpdate, IndexWriter.MaxFieldLength.UNLIMITED);
WebClient wc = new WebClient();
//编码,防止乱码
wc.Encoding = Encoding.UTF8;
int maxID;
try
{
//读取rss,获得第一个item中的链接的编号部分就是最大的帖子编号
maxID = GetMaxID();
}
catch (WebException webEx)
{
logger.Error("获得最大帖子号出错",webEx);
return;
}
for (int i = 1; i <= maxID; i++)
{
try
{
string url = "http://localhost:8080/showtopic-" + i + ".aspx";
logger.Debug("开始下载:"+url);
string html = wc.DownloadString(url);
HTMLDocumentClass doc = new HTMLDocumentClass();
doc.designMode = "on";//不让解析引擎尝试去执行
doc.IHTMLDocument2_write(html);
doc.close();
string title = doc.title;
string body = doc.body.innerText;
//为避免重复索引,先输出number=i的记录,在重新添加
write.DeleteDocuments(new Term("number", i.ToString()));
Document document = new Document();
//Field为字段,只有对全文检索的字段才分词,Field.Store是否存储
document.Add(new Field("number", i.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
document.Add(new Field("title", title, Field.Store.YES, Field.Index.NOT_ANALYZED));
document.Add(new Field("body", body, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
write.AddDocument(document);
logger.Debug("索引" + i.ToString() + "完毕");
}
catch (WebException webEx)
{
logger.Error("下载"+i.ToString()+"失败",webEx);
}
}
write.Close();
directory.Close();
logger.Debug("全部索引完毕");
}
//取最大帖子号
private int GetMaxID()
{
XDocument xdoc = XDocument.Load("Http://localhost:8080/tools/rss.aspx");
XElement channel = xdoc.Root.Element("channel");
XElement fitstItem = channel.Elements("item").First();
XElement link = fitstItem.Element("link");
Match match = Regex.Match(link.Value, @"http://localhost:8080/showtopic-(\d+)\.aspx");
string id = match.Groups[1].Value;
return Convert.ToInt32(id);
}
这样就创建了索引库,利用WebClient爬去所有网页的内容,这儿需要你添加引用Microsoft mshtml组件,MSHTML是微软公司的一个COM组件,该组件封装了HTML语言中的所有元素及其属性,通过其提供的标准接口,可以访问指定网页的所有元素。
当然,创建索引库最好定时给我们自动创建,类似于Windows计划任务。
在这儿你可以了解Quartz.Net
public class Global : System.Web.HttpApplication
{
private static ILog logger = LogManager.GetLogger(typeof(Global));
private IScheduler sched;
protected void Application_Start(object sender, EventArgs e)
{
//控制台就放在Main
logger.Debug("Application_Start");
log4net.Config.XmlConfigurator.Configure();
//从配置中读取任务启动时间
int indexStartHour = Convert.ToInt32(ConfigurationManager.AppSettings["IndexStartHour"]);
int indexStartMin = Convert.ToInt32(ConfigurationManager.AppSettings["IndexStartMin"]);
ISchedulerFactory sf = new StdSchedulerFactory();
sched = sf.GetScheduler();
JobDetail job = new JobDetail("job1", "group1", typeof(IndexJob));//IndexJob为实现了IJob接口的类
Trigger trigger = TriggerUtils.MakeDailyTrigger("tigger1", indexStartHour, indexStartMin);//每天10点3分执行
trigger.JobName = "job1";
trigger.JobGroup = "group1";
trigger.Group = "group1";
sched.AddJob(job, true);
sched.ScheduleJob(trigger);
//IIS启动了就不会来了
sched.Start();
}
protected void Session_Start(object sender, EventArgs e)
{
}
protected void Application_BeginRequest(object sender, EventArgs e)
{
}
protected void Application_AuthenticateRequest(object sender, EventArgs e)
{
}
protected void Application_Error(object sender, EventArgs e)
{
logger.Debug("网络出现未处理异常:",HttpContext.Current.Server.GetLastError());
}
protected void Session_End(object sender, EventArgs e)
{
}
protected void Application_End(object sender, EventArgs e)
{
logger.Debug("Application_End");
sched.Shutdown(true);
}
}
public class IndexJob:IJob { private ILog logger = LogManager.GetLogger(typeof(IndexJob)); public void Execute(JobExecutionContext context) { try { logger.Debug("索引开始"); CreateIndex(); logger.Debug("索引结束"); } catch (Exception ex) { logger.Debug("启动索引任务异常", ex); } } }
Ok,我们的索引库建立完了,接下来就是搜索了。
搜索的代码:
private string Preview(string body,string keyword)
{
PanGu.HighLight.SimpleHTMLFormatter simpleHTMLFormatter = new PanGu.HighLight.SimpleHTMLFormatter("<font color=\"Red\">","</font>");
PanGu.HighLight.Highlighter highlighter = new PanGu.HighLight.Highlighter(simpleHTMLFormatter, new Segment());
highlighter.FragmentSize = 100;
string bodyPreview = highlighter.GetBestFragment(keyword, body);
return bodyPreview;
}
public IEnumerable<Model.SearchSum> GetHotWords()
{
//缓存
var data=HttpRuntime.Cache["hotwords"];
if (data==null)
{
IEnumerable<Model.SearchSum> hotWords = DoSelect();
HttpRuntime.Cache.Insert("hotwords",hotWords,null,DateTime.Now.AddMilliseconds(30),TimeSpan.Zero );
return hotWords;
}
return (IEnumerable<Model.SearchSum>)data;
}
private IEnumerable<Model.SearchSum> DoSelect()
{
DataTable dt = SqlHelper.ExecuteDataTable(@"
select top 5 Keyword,count(*) as searchcount from keywords
where datediff(day,searchdatetime,getdate())<7
group by Keyword
order by count(*) desc ");
List<Model.SearchSum> list = new List<Model.SearchSum>();
if (dt!=null&&dt.Rows!=null&&dt.Rows.Count>0)
{
foreach (DataRow row in dt.Rows)
{
Model.SearchSum oneModel=new Model.SearchSum ();
oneModel.Keyword = Convert.ToString(row["keyword"]);
oneModel.SearchCount = Convert.ToInt32(row["SearchCount"]);
list.Add(oneModel);
}
}
return list;
}
public IEnumerable<Model.SearchSum> GetSuggestion(string kw)
{
DataTable dt = SqlHelper.ExecuteDataTable(@"select top 5 Keyword,count(*) as searchcount from keywords
where datediff(day,searchdatetime,getdate())<7
and keyword like @keyword
group by Keyword
order by count(*) desc",new SqlParameter("@keyword","%"+kw+"%"));
List<Model.SearchSum> list = new List<Model.SearchSum>();
if (dt != null && dt.Rows != null && dt.Rows.Count > 0)
{
foreach (DataRow row in dt.Rows)
{
Model.SearchSum oneModel = new Model.SearchSum();
oneModel.Keyword = Convert.ToString(row["keyword"]);
oneModel.SearchCount = Convert.ToInt32(row["SearchCount"]);
list.Add(oneModel);
}
}
return list;
}
protected void Page_Load(object sender, EventArgs e)
{
//加载热词
hotwordsRepeater.DataSource = new Dao.KeywordDao().GetHotWords();
hotwordsRepeater.DataBind();
kw = Request["kw"];
if (string.IsNullOrWhiteSpace(kw))
{
return;
}
//处理:将用户的搜索记录加入数据库,方便统计热词
Model.SerachKeyword model = new Model.SerachKeyword();
model.Keyword = kw;
model.SearchDateTime = DateTime.Now;
model.ClinetAddress = Request.UserHostAddress;
new Dao.KeywordDao().Add(model);
//分页控件
MyPage pager = new MyPage();
pager.TryParseCurrentPageIndex(Request["pagenum"]);
//超链接href属性
pager.UrlFormat = "CreateIndex.aspx?pagenum={n}&kw=" + Server.UrlEncode(kw);
int startRowIndex = (pager.CurrentPageIndex - 1) * pager.PageSize;
int totalCount = -1;
List<SearchResult> list = DoSearch(startRowIndex,pager.PageSize,out totalCount);
pager.TotalCount = totalCount;
RenderToHTML = pager.RenderToHTML();
dataRepeater.DataSource = list;
dataRepeater.DataBind();
}
private List<SearchResult> DoSearch(int startRowIndex,int pageSize,out int totalCount)
{
string indexPath = "C:/Index";
FSDirectory directory = FSDirectory.Open(new DirectoryInfo(indexPath), new NoLockFactory());
IndexReader reader = IndexReader.Open(directory, true);
//IndexSearcher是进行搜索的类
IndexSearcher searcher = new IndexSearcher(reader);
PhraseQuery query = new PhraseQuery();
foreach (string word in CommonHelper.SplitWord(kw))
{
query.Add(new Term("body", word));
}
query.SetSlop(100);//相聚100以内才算是查询到
TopScoreDocCollector collector = TopScoreDocCollector.create(1024, true);//最大1024条记录
searcher.Search(query, null, collector);
totalCount = collector.GetTotalHits();//返回总条数
ScoreDoc[] docs = collector.TopDocs(startRowIndex, pageSize).scoreDocs;//分页,下标应该从0开始吧,0是第一条记录
List<SearchResult> list = new List<SearchResult>();
for (int i = 0; i < docs.Length; i++)
{
int docID = docs[i].doc;//取文档的编号,这个是主键,lucene.net分配
//检索结果中只有文档的id,如果要取Document,则需要Doc再去取
//降低内容占用
Document doc = searcher.Doc(docID);
string number = doc.Get("number");
string title = doc.Get("title");
string body = doc.Get("body");
SearchResult searchResult = new SearchResult() { Number = number, Title = title, BodyPreview = Preview(body, kw) };
list.Add(searchResult);
}
return list;
}
<script type="text/javascript">
$(function () {
$("#txtKeyword").autocomplete(
{ source: "SearchSuggestion.ashx",
select: function (event, ui) { $("#txtKeyword").val(ui.item.value); $("#form1").submit(); }
});
});
</script>
<div align="center">
<input type="text" id="txtKeyword" name="kw" value='<%=kw %>'/>
<%-- <asp:Button ID="createIndexButton" runat="server" onclick="searchButton_Click"
Text="创建索引库" />--%>
<input type="submit" name="searchButton" value="搜索" style="width: 91px" /><br />
</div>
<br />
<ul id="hotwordsUL">
<asp:Repeater ID="hotwordsRepeater" runat="server">
<ItemTemplate>
<li><a href='CreateIndex.aspx?kw=<%#Eval("Keyword") %>'><%#Eval("Keyword") %></a></li>
</ItemTemplate>
</asp:Repeater>
</ul>
<br />
<asp:Repeater ID="dataRepeater" runat="server" EnableViewState="true">
<HeaderTemplate>
<ul>
</HeaderTemplate>
<ItemTemplate>
<li>
<a href='http://localhost:8080/showtopic-<%#Eval("Number") %>.aspx'><%#Eval("Title") %></a>
<br />
<%#Eval("BodyPreview") %>
</li>
</ItemTemplate>
<FooterTemplate>
</ul>
</FooterTemplate>
</asp:Repeater>
<br />
<div class="pager"><%=RenderToHTML%></div>
很巧,一年前我的微薄上的签名是七月能有31天,是给我的最大恩惠。现在,此刻,还是很一年前那样烦躁,不知所措,又是7月31日,那样的熟悉。 大三的这个夏天,和往常那样,静静的坐着,看到了凌晨的晨曦,那样安静,祥和。 再过20天,即将投出第一份简历,一切都来得那样快,让人不知所措。 或许,明年的7月31日,不再这么悲伤。 想起了,天下足球里对亨利的描述: 还是回到伦敦吧,通往海布里的列车一趟一趟运行着, 这里总会送走过去,迎来新生, 32岁的亨利就坐在那儿,深情的目光望过去,远远都是自己22岁的影子... 点击附件下载: