From 230c2fbc265657a56f5c95294efc6cdc8270024f Mon Sep 17 00:00:00 2001 From: Michelangelo Mori <328978+blkt@users.noreply.github.com> Date: Thu, 16 Jan 2025 12:37:49 +0100 Subject: [PATCH] Enhance logging. This change adds line numbers to log messages, and synergizes with Copilot specific changes that make exception handling in some of the copilot pipeline more localized, making it easier to track down issues with the proxy. There's no fundamental change in business logic. --- src/codegate/codegate_logging.py | 1 + src/codegate/providers/copilot/pipeline.py | 38 ++++++++++++++-------- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/src/codegate/codegate_logging.py b/src/codegate/codegate_logging.py index cb53ccc2..36d0d351 100644 --- a/src/codegate/codegate_logging.py +++ b/src/codegate/codegate_logging.py @@ -88,6 +88,7 @@ def setup_logging( [ structlog.processors.CallsiteParameter.MODULE, structlog.processors.CallsiteParameter.PATHNAME, + structlog.processors.CallsiteParameter.LINENO, ] ), ] diff --git a/src/codegate/providers/copilot/pipeline.py b/src/codegate/providers/copilot/pipeline.py index d1ef13da..ddf3184d 100644 --- a/src/codegate/providers/copilot/pipeline.py +++ b/src/codegate/providers/copilot/pipeline.py @@ -93,15 +93,19 @@ async def process_body( """Common processing logic for all strategies""" try: normalized_body = self.normalizer.normalize(body) + except Exception as e: + logger.error(f"Pipeline processing error: {e}") + return body, None - headers_dict = {} - for header in headers: - try: - name, value = header.split(":", 1) - headers_dict[name.strip().lower()] = value.strip() - except ValueError: - continue + headers_dict = {} + for header in headers: + try: + name, value = header.split(":", 1) + headers_dict[name.strip().lower()] = value.strip() + except ValueError: + continue + try: result = await self.instance.process_request( request=normalized_body, provider=self.provider_name, @@ -111,25 +115,33 @@ async def process_body( extra_headers=CopilotPipeline._get_copilot_headers(headers_dict), is_copilot=True, ) + except Exception as e: + logger.error(f"Pipeline processing error: {e}") + return body, None - if result.context.shortcut_response: + if result.context.shortcut_response: + try: # Return shortcut response to the user body = CopilotPipeline._create_shortcut_response( result, normalized_body.get("model", "gpt-4o-mini") ) logger.info(f"Pipeline created shortcut response: {body}") + except Exception as e: + logger.error(f"Pipeline processing error: {e}") + return body, None - elif result.request: + elif result.request: + try: # the pipeline did modify the request, return to the user # in the original LLM format body = self.normalizer.denormalize(result.request) # Uncomment the below to debug the request # logger.debug(f"Pipeline processed request: {body}") - return body, result.context - except Exception as e: - logger.error(f"Pipeline processing error: {e}") - return body, None + return body, result.context + except Exception as e: + logger.error(f"Pipeline processing error: {e}") + return body, None class CopilotFimNormalizer: