2) public void offerService();//启动jobtracker服务(启动各种内部线程)
3) public synchronized HeartbeatResponse heartbeat(TaskTrackerStatus status, boolean restarted,boolean initialContact,
boolean acceptNewTasks, short responseId) throws IOException {}//jobtracker 处理tasktracker发送心跳方法,首先检查该tasktracker是否属于节点黑名单,再检查tasktracker是否健康...最后通过心跳给tasktracker下达分配的任务。
五,部分相关属性
1) Map<JobID, JobInProgress> jobs =
Collections.synchronizedMap(new TreeMap<JobID, JobInProgress>());
//存储客户端提交的job信息,jobtracker为客户端提交的job生成一个JobInProgress对象进行跟踪;jobtracker采用三层多叉树(一个jobInProgress对应多个TaskInProgress,一个TaskInProgress对应多个Task Attempt)的结构进行任务的追踪。
2) TreeMap<String, ArrayList<JobInProgress>> userToJobsMap =
new TreeMap<String, ArrayList<JobInProgress>>();//映射用户的job信息,hadoop 集群可能创建多个账户
六,jobtracker 任务分配
jobtracker 从正在运行的job任务中选择取job,分配task前jobtracker 会判断申请任务的tasktracker 是否还有能力去执行新任务。如果可以执行,jobtracker 首先会尝试为每个job分配一个本地任务或者机架本地任务,如果不存在本地任务就分配非本地任务(先分配map任务,然后分配reduce任务)。
Collection<JobInProgress> jobQueue =
jobQueueJobInProgressListener.getJobQueue();//获取job队列
synchronized (jobQueue) {
for (JobInProgress job : jobQueue) {
if (job.getStatus().getRunState() == JobStatus.RUNNING) {
remainingMapLoad += (job.desiredMaps() - job.finishedMaps());
if (job.scheduleReduces()) {
remainingReduceLoad +=
(job.desiredReduces() - job.finishedReduces());
}
}
}
}//计算所有运行job剩下的map数和reduce数
for (int i=0; i < availableMapSlots; ++i) {
synchronized (jobQueue) {
for (JobInProgress job : jobQueue) {
if (job.getStatus().getRunState() != JobStatus.RUNNING) {
continue;
}
Task t = null;
// Try to schedule a node-local or rack-local Map task
t =
job.obtainNewNodeOrRackLocalMapTask(taskTrackerStatus,
numTaskTrackers, taskTrackerManager.getNumberOfUniqueHosts());
if (t != null) {
assignedTasks.add(t);
++numLocalMaps;
// Don't assign map tasks to the hilt!
// Leave some free slots in the cluster for future task-failures,
// speculative tasks etc. beyond the highest priority job
if (exceededMapPadding) {
break scheduleMaps;
}
// Try all jobs again for the next Map task
break;
}
// Try to schedule a node-local or rack-local Map task
t =
job.obtainNewNonLocalMapTask(taskTrackerStatus, numTaskTrackers,
taskTrackerManager.getNumberOfUniqueHosts());
if (t != null) {
assignedTasks.add(t);
++numNonLocalMaps;
// We assign at most 1 off-switch or speculative task
// This is to prevent TaskTrackers from stealing local-tasks
// from other TaskTrackers.
break scheduleMaps;
}
}
}
}//map 任务分配
boolean exceededReducePadding = false;
if (availableReduceSlots > 0) {
exceededReducePadding = exceededPadding(false, clusterStatus,
trackerReduceCapacity);
synchronized (jobQueue) {
for (JobInProgress job : jobQueue) {
if (job.getStatus().getRunState() != JobStatus.RUNNING ||
job.numReduceTasks == 0) {
continue;
}
Task t =
job.obtainNewReduceTask(taskTrackerStatus, numTaskTrackers,
taskTrackerManager.getNumberOfUniqueHosts()
);
if (t != null) {
assignedTasks.add(t);
break;
}
// Don't assign reduce tasks to the hilt!
// Leave some free slots in the cluster for future task-failures,
// speculative tasks etc. beyond the highest priority job
if (exceededReducePadding) {
break;
}
}
}
}//reduce任务分配,reduce任务不用考虑数据本地计算特性