mirror of
https://github.com/srbhr/Resume-Matcher.git
synced 2026-01-20 23:42:15 +00:00
get api for resume
This commit is contained in:
@@ -169,3 +169,66 @@ async def score_and_improve(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="sorry, something went wrong!",
|
||||
)
|
||||
|
||||
|
||||
@resume_router.get(
|
||||
"",
|
||||
summary="Get resume data from both resume and processed_resume models",
|
||||
)
|
||||
async def get_resume(
|
||||
request: Request,
|
||||
resume_id: str = Query(..., description="Resume ID to fetch data for"),
|
||||
db: AsyncSession = Depends(get_db_session),
|
||||
):
|
||||
"""
|
||||
Retrieves resume data from both resume_model and processed_resume model by resume_id.
|
||||
|
||||
Args:
|
||||
resume_id: The ID of the resume to retrieve
|
||||
|
||||
Returns:
|
||||
Combined data from both resume and processed_resume models
|
||||
|
||||
Raises:
|
||||
HTTPException: If the resume is not found or if there's an error fetching data.
|
||||
"""
|
||||
request_id = getattr(request.state, "request_id", str(uuid4()))
|
||||
headers = {"X-Request-ID": request_id}
|
||||
|
||||
try:
|
||||
if not resume_id:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="resume_id is required",
|
||||
)
|
||||
|
||||
resume_service = ResumeService(db)
|
||||
resume_data = await resume_service.get_resume_with_processed_data(
|
||||
resume_id=resume_id
|
||||
)
|
||||
|
||||
if not resume_data:
|
||||
raise ResumeNotFoundError(
|
||||
message=f"Resume with id {resume_id} not found"
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"request_id": request_id,
|
||||
"data": resume_data,
|
||||
},
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
except ResumeNotFoundError as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(e),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching resume: {str(e)} - traceback: {traceback.format_exc()}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Error fetching resume data",
|
||||
)
|
||||
@@ -6,13 +6,16 @@ import logging
|
||||
|
||||
from markitdown import MarkItDown
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.future import select
|
||||
from pydantic import ValidationError
|
||||
from typing import Dict, Optional
|
||||
|
||||
from app.models import Resume, ProcessedResume
|
||||
from app.agent import AgentManager
|
||||
from app.prompt import prompt_factory
|
||||
from app.schemas.json import json_schema_factory
|
||||
from app.schemas.pydantic import StructuredResumeModel
|
||||
from .exceptions import ResumeNotFoundError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -157,3 +160,57 @@ class ResumeService:
|
||||
logger.info(f"Validation error: {e}")
|
||||
return None
|
||||
return structured_resume.model_dump()
|
||||
|
||||
async def get_resume_with_processed_data(self, resume_id: str) -> Optional[Dict]:
|
||||
"""
|
||||
Fetches both resume and processed resume data from the database and combines them.
|
||||
|
||||
Args:
|
||||
resume_id: The ID of the resume to retrieve
|
||||
|
||||
Returns:
|
||||
Combined data from both resume and processed_resume models
|
||||
|
||||
Raises:
|
||||
ResumeNotFoundError: If the resume is not found
|
||||
"""
|
||||
# Fetch resume data
|
||||
resume_query = select(Resume).where(Resume.resume_id == resume_id)
|
||||
resume_result = await self.db.execute(resume_query)
|
||||
resume = resume_result.scalars().first()
|
||||
|
||||
if not resume:
|
||||
raise ResumeNotFoundError(resume_id=resume_id)
|
||||
|
||||
# Fetch processed resume data
|
||||
processed_query = select(ProcessedResume).where(ProcessedResume.resume_id == resume_id)
|
||||
processed_result = await self.db.execute(processed_query)
|
||||
processed_resume = processed_result.scalars().first()
|
||||
|
||||
# Combine the data
|
||||
combined_data = {
|
||||
"resume_id": resume.resume_id,
|
||||
"raw_resume": {
|
||||
"id": resume.id,
|
||||
"content": resume.content,
|
||||
"content_type": resume.content_type,
|
||||
"created_at": resume.created_at.isoformat() if resume.created_at else None,
|
||||
},
|
||||
"processed_resume": None
|
||||
}
|
||||
|
||||
if processed_resume:
|
||||
# Parse JSON fields back to dictionaries
|
||||
combined_data["processed_resume"] = {
|
||||
"personal_data": json.loads(processed_resume.personal_data) if processed_resume.personal_data else None,
|
||||
"experiences": json.loads(processed_resume.experiences) if processed_resume.experiences else None,
|
||||
"projects": json.loads(processed_resume.projects) if processed_resume.projects else None,
|
||||
"skills": json.loads(processed_resume.skills) if processed_resume.skills else None,
|
||||
"research_work": json.loads(processed_resume.research_work) if processed_resume.research_work else None,
|
||||
"achievements": json.loads(processed_resume.achievements) if processed_resume.achievements else None,
|
||||
"education": json.loads(processed_resume.education) if processed_resume.education else None,
|
||||
"extracted_keywords": json.loads(processed_resume.extracted_keywords) if processed_resume.extracted_keywords else None,
|
||||
"processed_at": processed_resume.processed_at.isoformat() if processed_resume.processed_at else None,
|
||||
}
|
||||
|
||||
return combined_data
|
||||
|
||||
Reference in New Issue
Block a user