Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

Commit f75942e

Browse files
committed
Enhance logging.
This change configures the logger to print log lines and sets the root logger to `ERROR` in order to silence chatty libraries. This setting does not affect application logs since they're overridden a couple lines later with the desired log level from config. Additionally, there's a change that is Copilot specific that synergizes with logging line numbers for easier bug tracking. There's no fundamental change in business logic.
1 parent 7e4b963 commit f75942e

File tree

2 files changed

+26
-13
lines changed

2 files changed

+26
-13
lines changed

src/codegate/codegate_logging.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ def setup_logging(
8888
[
8989
structlog.processors.CallsiteParameter.MODULE,
9090
structlog.processors.CallsiteParameter.PATHNAME,
91+
structlog.processors.CallsiteParameter.LINENO,
9192
]
9293
),
9394
]

src/codegate/providers/copilot/pipeline.py

Lines changed: 25 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -93,15 +93,19 @@ async def process_body(
9393
"""Common processing logic for all strategies"""
9494
try:
9595
normalized_body = self.normalizer.normalize(body)
96+
except Exception as e:
97+
logger.error(f"Pipeline processing error: {e}")
98+
return body, None
9699

97-
headers_dict = {}
98-
for header in headers:
99-
try:
100-
name, value = header.split(":", 1)
101-
headers_dict[name.strip().lower()] = value.strip()
102-
except ValueError:
103-
continue
100+
headers_dict = {}
101+
for header in headers:
102+
try:
103+
name, value = header.split(":", 1)
104+
headers_dict[name.strip().lower()] = value.strip()
105+
except ValueError:
106+
continue
104107

108+
try:
105109
result = await self.instance.process_request(
106110
request=normalized_body,
107111
provider=self.provider_name,
@@ -111,25 +115,33 @@ async def process_body(
111115
extra_headers=CopilotPipeline._get_copilot_headers(headers_dict),
112116
is_copilot=True,
113117
)
118+
except Exception as e:
119+
logger.error(f"Pipeline processing error: {e}")
120+
return body, None
114121

115-
if result.context.shortcut_response:
122+
if result.context.shortcut_response:
123+
try:
116124
# Return shortcut response to the user
117125
body = CopilotPipeline._create_shortcut_response(
118126
result, normalized_body.get("model", "gpt-4o-mini")
119127
)
120128
logger.info(f"Pipeline created shortcut response: {body}")
129+
except Exception as e:
130+
logger.error(f"Pipeline processing error: {e}")
131+
return body, None
121132

122-
elif result.request:
133+
elif result.request:
134+
try:
123135
# the pipeline did modify the request, return to the user
124136
# in the original LLM format
125137
body = self.normalizer.denormalize(result.request)
126138
# Uncomment the below to debug the request
127139
# logger.debug(f"Pipeline processed request: {body}")
128140

129-
return body, result.context
130-
except Exception as e:
131-
logger.error(f"Pipeline processing error: {e}")
132-
return body, None
141+
return body, result.context
142+
except Exception as e:
143+
logger.error(f"Pipeline processing error: {e}")
144+
return body, None
133145

134146

135147
class CopilotFimNormalizer:

0 commit comments

Comments
 (0)