private Map getARow() {
if (resultSet == null)
return null;
Map result = new HashMap();
for (String colName : colNames) {
try {
if (!convertType) {
// Use underlying database's type information
result.put(colName, resultSet.getObject(colName));
continue;
}
Integer type = fieldNameVsType.get(colName);
if (type == null)
type = Types.VARCHAR;
switch (type) {
case Types.INTEGER:
result.put(colName, resultSet.getInt(colName));
break;
case Types.FLOAT:
result.put(colName, resultSet.getFloat(colName));
break;
case Types.BIGINT:
result.put(colName, resultSet.getLong(colName));
break;
case Types.DOUBLE:
result.put(colName, resultSet.getDouble(colName));
break;
case Types.DATE:
result.put(colName, resultSet.getDate(colName));
break;
case Types.BOOLEAN:
result.put(colName, resultSet.getBoolean(colName));
break;
case Types.BLOB:
result.put(colName, resultSet.getBytes(colName));
break;
default:
result.put(colName, resultSet.getString(colName));
break;
}
} catch (SQLException e) {
logError("Error reading data ", e);
wrapAndThrow(SEVERE, e, "Error reading data from database");
}
}
return result;
}
private List readFieldNames(ResultSetMetaData metaData)
throws SQLException {
List colNames = new ArrayList();
int count = metaData.getColumnCount();
for (int i = 0; i < count; i++) {
colNames.add(metaData.getColumnLabel(i + 1));
}
return colNames; }
Iterator rSetIterator 为数据迭代器
rSetIterator = new Iterator() {
public boolean hasNext() {
return hasnext();
}
public Map next() {
return getARow();
}
public void remove() {/* do nothing */
}
}; 从这里可以看出,solr自带的数据导入是采取 迭代器的方式导入数据的,防止数据表数据量过大的时候出现out of memery的异常
网上推荐的采取类似分页的方式 读取数据然后添加到solr索引库的方式个人感觉比较拙劣,当我们采取编程的方式从数据库读取数据添加到solr索引库的时候可以参考这种方式,采取原始的JDBC数据访问方式,有时间我再贴出来分享。