I don't this would help
def edit_google_doc(service, course_id, coursework_id, student_id):
processed_assignments = load_processed_assignments()
if is_assignment_processed(course_id, coursework_id, processed_assignments):
print(f"Assignment {coursework_id} for course {course_id} has already been processed.")
return # Skip processing if already done
try:
# Retrieve submissions for the assignment
submissions = service.courses().courseWork().studentSubmissions().list(
courseId=course_id,
courseWorkId=coursework_id,
userId=student_id
).execute().get('studentSubmissions', [])
for submission in submissions:
# Check for attachments
attachments = submission.get('assignmentSubmission', {}).get('attachments', [])
for attachment in attachments:
if 'driveFile' in attachment:
drive_file = attachment['driveFile']
doc_id = drive_file['id'] # Get the document ID
# Retrieve the content of the document
doc_content = retrieve_document_content(service, doc_id)
# Define prompts
prompt1 = "List my spelling, grammar, and punctuation mistakes number-wise."
prompt2 = ""
# Call the edit_document function with document content and prompts
edit_document(service, doc_id, doc_content, prompt1, prompt2)
# Mark the assignment as processed if the above steps were successful
mark_assignment_processed(course_id, coursework_id, processed_assignments)
except Exception as e:
print(f"Error retrieving submissions or attachments: {e}")
def get_chat_model_response(prompt):
try:
response = openai.ChatCompletion.create(
engine="gpt-4-32k", # Use GPT-3.5-turbo model here
messages=[{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}], # Pass the prompt in a message format
max_tokens=4000 # Specify token limit
)
return response['choices'][0]['message']['content'].strip() # Extract the response text
except Exception as e:
print(f"Error getting response from gpt-4-32k: {e}")
return "Default content due to error." # Fallback content
# Modify the edit_document function to include GPT-3.5-turbo model
def edit_document(service, doc_id, doc_content, prompt1, prompt2):
docs_service = get_docs_service() # Get the Docs service
try:
# Combine the document content with the first prompt
full_prompt1 = f"{prompt1}\n\nDocument Content:\n{doc_content}"
new_content1 = get_chat_model_response(full_prompt1) # GPT-3.5-turbo generates a response
# Fetch the document to get its current structure
document = docs_service.documents().get(documentId=doc_id).execute()
content = document.get('body').get('content', [])
# Determine the insert index for new content
insert_index1 = 1 if not content else len(content) # Add at the end of the document
# Prepare and execute the request to insert the first GPT-3.5-turbo response
requests = [
{
'insertText': {
'location': {
'index': insert_index1,
},
'text': f'\n{new_content1}\n' # Add a newline before and after
}
}
]
docs_service.documents().batchUpdate(documentId=doc_id, body={'requests': requests}).execute()
print(f"Document {doc_id} updated successfully with first content.")
# Combine the document content with the second prompt
full_prompt2 = f"{prompt2}\n\nDocument Content:\n{doc_content}"
new_content2 = get_chat_model_response(full_prompt2) # GPT-3.5-turbo generates another response
# Calculate the new insertion point (after the first content)
insert_index2 = insert_index1 + len(new_content1) + 2 # +2 for the additional newline
# Prepare and execute the request to insert the second GPT-3.5-turbo response
requests = [
{
'insertText': {
'location': {
'index': insert_index2,
},
'text': f'\n{new_content2}\n' # Add a newline before and after
}
}
]
docs_service.documents().batchUpdate(documentId=doc_id, body={'requests': requests}).execute()
print(f"Document {doc_id} updated successfully with second content.")
except HttpError as error:
print(f"An error occurred while updating document {doc_id}: {error}")