1、
JAVA代碼(索引)
package bindex;
import java.io.IOException;
import java.net.URL;
import jeasy.analysis.MMAnalyzer;
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.LockObtainFailedException;
import org.htmlparser.Node;
import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.beans.LinkBean;
import org.htmlparser.filters.AndFilter;
import org.htmlparser.filters.HasAttributeFilter;
import org.htmlparser.filters.NotFilter;
import org.htmlparser.filters.OrFilter;
import org.htmlparser.filters.RegexFilter;
import org.htmlparser.filters.TagNameFilter;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;
public class perfieldindextest {
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
String indexpath="./indexes";
IndexWriter writer;
PerFieldAnalyzerWrapper wr;
Document doc;
try {
writer=new IndexWriter(indexpath,new StandardAnalyzer());
wr=new PerFieldAnalyzerWrapper(new StandardAnalyzer());
wr.addAnalyzer("title",new MMAnalyzer());
wr.addAnalyzer("content", new MMAnalyzer());
wr.addAnalyzer("author", new MMAnalyzer());
wr.addAnalyzer("time", new StandardAnalyzer());
//提取騰迅國內(nèi)新聞鏈接
LinkBean lb=new LinkBean();
lb.setURL("
http://news.qq.com/china_index.shtml
");
URL[] urls=lb.getLinks();
for (int i=0;i<urls.length;i++){
doc=new Document();
String title="";
String content="";
String time="";
String author="";
System.out.println("正在提取網(wǎng)頁第"+i+"個鏈接("+(int)(100*(i+1)/urls.length)+"%)["+urls[i].toString()+"].....");
if (!(urls[i].toString().startsWith("
http://news.qq.com/a/
"))){
System.out.println("非新聞鏈接,忽略......");continue;
}
System.out.println("新聞鏈接,正在處理");
Parser parser=new Parser(urls[i].toString());
parser.setEncoding("GBK");
String url=urls[i].toString();
NodeFilter filter_title=new TagNameFilter("title");
NodeList nodelist=parser.parse(filter_title);
Node node_title=nodelist.elementAt(0);
title=node_title.toPlainTextString();
System.out.println("標題:"+title);
parser.reset();
NodeFilter filter_auth=new OrFilter(new HasAttributeFilter("class","auth"),new HasAttributeFilter("class","where"));
nodelist=parser.parse(filter_auth);
Node node_auth=nodelist.elementAt(0);
if (node_auth != null) author=node_auth.toPlainTextString();
else author="騰訊網(wǎng)";
node_auth=nodelist.elementAt(1);
if (node_auth != null) author+=node_auth.toPlainTextString();
System.out.println("作者:"+author);
parser.reset();
NodeFilter filter_time=new OrFilter(new HasAttributeFilter("class","info"),new RegexFilter("[0-9]{4}年[0-9]{1,2}月[0-9]{1,2}日[' ']*[0-9]{1,2}:[0-9]{1,2}"));
nodelist=parser.parse(filter_time);
Node node_time=nodelist.elementAt(0);
if (node_time.getChildren()!=null) node_time=node_time.getFirstChild();
time=node_time.toPlainTextString().replaceAll("[ |\t|\n|\f|\r\u3000]","").substring(0,16);
System.out.println("時間:"+time);
parser.reset();
NodeFilter filter_content=new OrFilter(new OrFilter(new HasAttributeFilter("style","TEXT-INDENT: 2em"),new HasAttributeFilter("id","Cnt-Main-Article-QQ")),new HasAttributeFilter("id","ArticleCnt"));
nodelist=parser.parse(filter_content);
Node node_content=nodelist.elementAt(0);
content=node_content.toPlainTextString().replaceAll("(#.*)|([a-z].*;)|}","").replaceAll(" |\t|\r|\n|\u3000","");
System.out.println("內(nèi)容:"+content);
System.out.println("正在索引.....");
Field field=new Field("title",title,Field.Store.YES,Field.Index.TOKENIZED);
doc.add(field);
field=new Field("content",content,Field.Store.YES,Field.Index.TOKENIZED);
doc.add(field);
field=new Field("author",author,Field.Store.YES,Field.Index.UN_TOKENIZED);
doc.add(field);
field=new Field("time",time,Field.Store.YES,Field.Index.NO);
doc.add(field);
field=new Field("url",url,Field.Store.YES,Field.Index.NO);
doc.add(field);
writer.addDocument(doc,new MMAnalyzer());
System.out.println("<"+title+"索引成功>");
}
writer.close();
wr.close();
} catch (ParserException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (CorruptIndexException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
筆者BLOG: http://blog.163.com/sukerl@126/
Servlet代碼(搜索):
package bservlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.*;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.*;
import java.io.*;
import jeasy.analysis.MMAnalyzer;
public class SluceneSearcher extends HttpServlet {
private String indexpath="D:/workspace/testsearch2/indexes";
public void doPost(HttpServletRequest request,HttpServletResponse response){
StringBuffer sb=new StringBuffer("");
try {
request.setCharacterEncoding("GBK");
String phrase=request.getParameter("phrase");
Analyzer analyzer=new MMAnalyzer();
IndexSearcher searcher;
searcher = new IndexSearcher(indexpath);
QueryParser parser=new QueryParser("content",analyzer);
Query q= parser.parse(phrase);
Hits hs=searcher.search(q);
int num=hs.length();
sb.append("<h1>您搜索到的記錄數(shù):"+num+"</h1>");
for (int i=0;i<num;i++){
Document doc=hs.doc(i);
if (doc==null){
continue;
}
Field field_title=doc.getField("title");
String title="<br><a href="+doc.getField("url").stringValue()+" target='_blank'>"+field_title.stringValue()+"</a><br>";
Field field_author=doc.getField("author");
String author="<br>author:<br>"+field_author.stringValue();
Field field_time=doc.getField("time");
String time="<br>time:<br>"+field_time.stringValue();
sb.append(title);
sb.append(author);
sb.append(time);
}
searcher.close();
} catch (CorruptIndexException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
} catch (ParseException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
PrintWriter out;
try {
response.setContentType("text/html;charset=GBK");
out = response.getWriter();
out.print(sb.toString());
out.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public void doGet(HttpServletRequest request,HttpServletResponse response){
doPost(request,response);
}
}
WEB.XML:
<?xml version="1.0" encoding="ISO-8859-1"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<web-app xmlns="
http://java.sun.com/xml/ns/javaee
"
xmlns:xsi="
http://www.w3.org/2001/XMLSchema-instance
"
xsi:schemaLocation="
http://java.sun.com/xml/ns/javaee
http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd
"
version="2.5">
<display-name>news-search</display-name>
<description>
news-search
</description>
<servlet>
<servlet-name>newssearch</servlet-name>
<servlet-class>bservlet.SluceneSearcher</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>newssearch</servlet-name>
<url-pattern>/deepfuturesou</url-pattern>
</servlet-mapping>
</web-app>
注意deepfuturesou是虛擬路徑,不要實際建立該目錄,但必須注意要和搜索網(wǎng)頁中指定的保持一致,與對應(yīng)的servlet保持一致。
搜索網(wǎng)頁:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "
http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd
">
<html xmlns="
http://www.w3.org/1999/xhtml
">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=gb2312" />
<title>騰訊國內(nèi)新聞搜索</title>
</head>
<body>
<form id="form1" name="form1" method="post" action="deepfuturesou">
搜索關(guān)鍵字
<input name="phrase" type="text" id="phrase" />
<input type="submit" name="Submit" value="搜索" />
</form>
</body>
</html>
2、效果(對QQ國內(nèi)新聞搜索)
正在提取網(wǎng)頁第0個鏈接(0%)[http://news.qq.com/china_index.shtml#].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第1個鏈接(1%)[http://3g.qq.com].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第2個鏈接(1%)[http://www.qq.com].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第3個鏈接(2%)[http://news.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第4個鏈接(3%)[http://news.qq.com/photo.shtml].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第5個鏈接(3%)[http://news.qq.com/scroll/now.htm].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第6個鏈接(4%)[http://news.qq.com/paihang.htm].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第7個鏈接(5%)[http://news.qq.com/china_index.shtml].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第8個鏈接(5%)[http://news.qq.com/world_index.shtml].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第9個鏈接(6%)[http://news.qq.com/society_index.shtml].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第10個鏈接(6%)[http://report.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第11個鏈接(7%)[http://news.qq.com/military.shtml].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第12個鏈接(8%)[http://view.news.qq.com/index/zhuanti/zt_more.htm].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第13個鏈接(8%)[http://view.news.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第14個鏈接(9%)[http://news.qq.com/topic/feature.htm].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第15個鏈接(10%)[http://blog.qq.com/news/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第16個鏈接(10%)[http://news.qq.com/photon/videonews/morevideo.htm].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第17個鏈接(11%)[http://bj.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第18個鏈接(11%)[http://sh.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第19個鏈接(12%)[http://gd.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第20個鏈接(13%)[http://cq.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第21個鏈接(13%)[http://xian.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第22個鏈接(14%)[http://cd.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第23個鏈接(15%)[http://js.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第24個鏈接(15%)[http://zj.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第25個鏈接(16%)[http://sd.qq.com/].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第26個鏈接(16%)[http://news.qq.com/{clickurl}].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第27個鏈接(17%)[http://news.qq.com/{clickurl}].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第28個鏈接(18%)[http://news.qq.com/{clickurl}].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第29個鏈接(18%)[http://news.qq.com/china_index.shtml#].....
非新聞鏈接,忽略......
正在提取網(wǎng)頁第30個鏈接(19%)[http://news.qq.com/a/20091127/000644.htm].....
新聞鏈接,正在處理
標題:組圖:武漢東湖上千萬搖蚊引發(fā)多起車禍_新聞國內(nèi)_新聞_騰訊網(wǎng)
作者:中國新聞網(wǎng)
時間:2009年11月27日10:00
內(nèi)容:中&白色大理石護欄被搖蚊“刷黑”。中新社發(fā)楚天行攝functionSplitPages(name,pageID,listID){SplitPages.prototype.checkPages=function(){SplitPages.prototype.createHtml=function(mode){if(this.pageCount>this.page+2){else{i++){if(i>0){if(i==this.page){else{if(i!=1&&i!=this.pageCount){SplitPages.prototype.Output=function(mode){SplitPages.prototype.setPage=function(mode){$(window.onload=function(){varimgsimgs=$("imgsimgs")changeImg(imgsimgs)近日由于氣溫上升,武漢東湖沙灘浴場附近的環(huán)湖路上落下大量搖蚊,過往汽車碾壓后,成“油垢”致路面異常光滑,引發(fā)多起車禍。2009年11月24日7時許,一輛黑色轎車,在東湖沙灘浴場旁的彎道處突然失控,撞到路旁的石頭上,車頭面目全非。這是當天早晨在這里發(fā)生的第4起,一輛汽車還將一棵臉盆粗的大樹撞到湖里。另外還有5、6輛摩托車也在這里滑倒。東湖環(huán)衛(wèi)管理處派出職工,用高壓水槍來清洗路面的“油垢”,以防汽車打滑。[責任編輯:morganli]
正在索引.....
<組圖:武漢東湖上千萬搖蚊引發(fā)多起車禍_新聞國內(nèi)_新聞_騰訊網(wǎng)索引成功>
正在提取網(wǎng)頁第31個鏈接(20%)[http://news.qq.com/a/20091127/000644.htm].....
更多文章、技術(shù)交流、商務(wù)合作、聯(lián)系博主
微信掃碼或搜索:z360901061
微信掃一掃加我為好友
QQ號聯(lián)系: 360901061
您的支持是博主寫作最大的動力,如果您喜歡我的文章,感覺我的文章對您有幫助,請用微信掃描下面二維碼支持博主2元、5元、10元、20元等您想捐的金額吧,狠狠點擊下面給點支持吧,站長非常感激您!手機微信長按不能支付解決辦法:請將微信支付二維碼保存到相冊,切換到微信,然后點擊微信右上角掃一掃功能,選擇支付二維碼完成支付。
【本文對您有幫助就好】元

