Commit b1fe0b21 authored by Hector Oliveros's avatar Hector Oliveros Committed by GitHub

Update connection limit logging to check for changes (#1777)

* Update connection limit logging to check for changes

Added conditionals to log limit updates only when changes are detected, reducing unnecessary log noise. This applies to both server and client limit updates in the connection logic.

* Fix indentation in server and client limit updates

Adjusted indentation in the `update_limits` methods to ensure values are updated only when changes are detected. This aligns the logic with the condition and avoids unnecessary updates.

* Fix ruff format

---------
Co-authored-by: default avatarHector Oliveros <holiveros.see@aminerals.cl>
parent c195f37c
......@@ -796,7 +796,7 @@ class Client:
and new_keepalive_count != params.RequestedMaxKeepAliveCount
):
_logger.info(
"KeepAliveCount will be updated to %s " "for consistency with RevisedPublishInterval",
"KeepAliveCount will be updated to %s for consistency with RevisedPublishInterval",
new_keepalive_count,
)
modified_params = ua.ModifySubscriptionParameters()
......
......@@ -219,7 +219,7 @@ class Reconciliator:
# in case the previous create_subscription request failed
if not real_sub:
_logger.warning(
"Can't create nodes for %s since underlying " "subscription for %s doesn't exist", url, sub_name
"Can't create nodes for %s since underlying subscription for %s doesn't exist", url, sub_name
)
continue
vs_real = real_map[url][sub_name]
......
......@@ -54,11 +54,18 @@ class TransportLimits:
ack.SendBufferSize = min(msg.SendBufferSize, self.max_recv_buffer)
ack.MaxChunkCount = self._select_limit(msg.MaxChunkCount, self.max_chunk_count)
ack.MaxMessageSize = self._select_limit(msg.MaxMessageSize, self.max_message_size)
self.max_chunk_count = ack.MaxChunkCount
self.max_recv_buffer = ack.SendBufferSize
self.max_send_buffer = ack.ReceiveBufferSize
self.max_message_size = ack.MaxMessageSize
_logger.info("updating server limits to: %s", self)
have_changes = (
self.max_chunk_count != ack.MaxChunkCount
or self.max_recv_buffer != ack.ReceiveBufferSize
or self.max_send_buffer != ack.SendBufferSize
or self.max_message_size != ack.MaxMessageSize
)
if have_changes:
_logger.info("updating server limits to: %s", self)
self.max_chunk_count = ack.MaxChunkCount
self.max_recv_buffer = ack.SendBufferSize
self.max_send_buffer = ack.ReceiveBufferSize
self.max_message_size = ack.MaxMessageSize
return ack
def create_hello_limits(self, msg: ua.Hello) -> ua.Hello:
......@@ -69,11 +76,18 @@ class TransportLimits:
return msg
def update_client_limits(self, msg: ua.Acknowledge) -> None:
self.max_chunk_count = msg.MaxChunkCount
self.max_recv_buffer = msg.ReceiveBufferSize
self.max_send_buffer = msg.SendBufferSize
self.max_message_size = msg.MaxMessageSize
_logger.info("updating client limits to: %s", self)
have_changes = (
self.max_chunk_count != msg.MaxChunkCount
or self.max_recv_buffer != msg.ReceiveBufferSize
or self.max_send_buffer != msg.SendBufferSize
or self.max_message_size != msg.MaxMessageSize
)
if have_changes:
_logger.info("updating client limits to: %s", self)
self.max_chunk_count = msg.MaxChunkCount
self.max_recv_buffer = msg.ReceiveBufferSize
self.max_send_buffer = msg.SendBufferSize
self.max_message_size = msg.MaxMessageSize
class MessageChunk:
......@@ -370,8 +384,7 @@ class SecureConnection:
)
if timeout < datetime.now(timezone.utc):
raise ua.UaError(
f"Security token id {security_hdr.TokenId} has timed out "
f"({timeout} < {datetime.now(timezone.utc)})"
f"Security token id {security_hdr.TokenId} has timed out ({timeout} < {datetime.now(timezone.utc)})"
)
return
......@@ -386,7 +399,7 @@ class SecureConnection:
if chunk.MessageHeader.MessageType != ua.MessageType.SecureOpen:
if chunk.MessageHeader.ChannelId != self.security_token.ChannelId:
raise ua.UaError(
f"Wrong channel id {chunk.MessageHeader.ChannelId}," f" expected {self.security_token.ChannelId}"
f"Wrong channel id {chunk.MessageHeader.ChannelId}, expected {self.security_token.ChannelId}"
)
if self._incoming_parts:
if self._incoming_parts[0].SequenceHeader.RequestId != chunk.SequenceHeader.RequestId:
......
......@@ -118,7 +118,7 @@ async def _read_and_copy_attrs(node_type: asyncua.Node, struct: Any, addnode: ua
setattr(struct, name, variant.Value)
else:
_logger.warning(
"Instantiate: while copying attributes from node type %s," " attribute %s, statuscode is %s",
"Instantiate: while copying attributes from node type %s, attribute %s, statuscode is %s",
str(node_type),
str(name),
str(results[idx].StatusCode),
......
......@@ -43,7 +43,7 @@ class Buffer:
self._size = size
def __str__(self):
return f"Buffer(size:{self._size}, data:{self._data[self._cur_pos:self._cur_pos + self._size]})"
return f"Buffer(size:{self._size}, data:{self._data[self._cur_pos : self._cur_pos + self._size]})"
__repr__ = __str__
......
......@@ -669,7 +669,7 @@ class XmlImporter:
is_struct = True
else:
_logger.warning(
"%s has datatypedefinition and path %s" " but we could not find out if this is a struct",
"%s has datatypedefinition and path %s but we could not find out if this is a struct",
obj,
path,
)
......
......@@ -112,7 +112,7 @@ class HistorySQLite(HistoryStorageInterface):
try:
validate_table_name(table)
async with self._db.execute(
f'SELECT * FROM "{table}" WHERE "SourceTimestamp" BETWEEN ? AND ? ' f'ORDER BY "_Id" {order} LIMIT ?',
f'SELECT * FROM "{table}" WHERE "SourceTimestamp" BETWEEN ? AND ? ORDER BY "_Id" {order} LIMIT ?',
(
start_time,
end_time,
......@@ -294,7 +294,7 @@ class HistorySQLite(HistoryStorageInterface):
s_clauses.append(name)
except AttributeError:
self.logger.warning(
"Historizing SQL OPC UA Select Clause Warning for node %s," " Clause: %s:", source_id, select_clause
"Historizing SQL OPC UA Select Clause Warning for node %s, Clause: %s:", source_id, select_clause
)
# remove select clauses that the event type doesn't have; SQL will error because the column doesn't exist
clauses = [x for x in s_clauses if x in self._event_fields[source_id]]
......
......@@ -59,8 +59,7 @@ class InternalSession(AbstractSession):
def __str__(self):
return (
f"InternalSession(name:{self.name},"
f" user:{self.user}, id:{self.session_id}, auth_token:{self.auth_token})"
f"InternalSession(name:{self.name}, user:{self.user}, id:{self.session_id}, auth_token:{self.auth_token})"
)
async def get_endpoints(self, params=None, sockname=None):
......
......@@ -39,7 +39,7 @@ class CertificateUserManager:
if name in self._trusted_certificates:
logging.warning(
"certificate with name %s " "attempted to be added multiple times, only the last version will be kept.",
"certificate with name %s attempted to be added multiple times, only the last version will be kept.",
name,
)
self._trusted_certificates[name] = {"certificate": uacrypto.der_from_x509(certificate), "user": user}
......
......@@ -786,7 +786,7 @@ class LocalizedText:
if self.Text is not None:
if not isinstance(self.Text, str):
raise ValueError(
f'A LocalizedText object takes a string as argument "text"' f"not a {type(self.Text)}, {self.Text}"
f'A LocalizedText object takes a string as argument "text"not a {type(self.Text)}, {self.Text}'
)
if self.Locale is not None:
......
......@@ -503,7 +503,7 @@ async def test_security_level_endpoints(srv_crypto_all_certs: Tuple[Server, str]
policy_type = ua.SecurityPolicyType.NoSecurity
else:
policy_type = ua.SecurityPolicyType[
f'{end_point.SecurityPolicyUri.split("#")[1].replace("_", "")}_{end_point.SecurityMode.name}'
f"{end_point.SecurityPolicyUri.split('#')[1].replace('_', '')}_{end_point.SecurityMode.name}"
]
assert end_point.SecurityLevel == SECURITY_POLICY_TYPE_MAP[policy_type][2]
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment