MT#33006 repoapi: upgrade structlog config (3.0+)

https: //django-structlog.readthedocs.io/en/latest/upgrade_guide.html
Change-Id: I76b4f26aca2a763fb106bfe6c7c85ea3b6c06d16
pull/9/head
Victor Seva 3 years ago
parent dc4d537f3d
commit 951a274c51

@ -50,7 +50,6 @@ class BuildReleaseManager(models.Manager):
) )
def create_build_release(self, uuid, release, fake=False): def create_build_release(self, uuid, release, fake=False):
log = logger.bind(uuid=str(uuid), release=release, fake=fake)
config = ReleaseConfig(release) config = ReleaseConfig(release)
qs = self.get_queryset() qs = self.get_queryset()
br = qs.filter( br = qs.filter(
@ -59,18 +58,17 @@ class BuildReleaseManager(models.Manager):
release_ok = config.release release_ok = config.release
if br.exists(): if br.exists():
if regex_mrXXX.match(config.branch): if regex_mrXXX.match(config.branch):
msg = "release[mrX.Y.Z]:{} has already a build" msg = f"release[mrX.Y.Z]:{release} has already a build"
raise BuildReleaseUnique(msg.format(release)) raise BuildReleaseUnique(msg)
elif regex_mrXX.match(config.branch): elif regex_mrXX.match(config.branch):
release_ok = "{}-update".format(config.release) release_ok = f"{config.release}-update"
msg = ( msg = (
"release[mrX.Y]:{} has already a build, " f"release[mrX.Y]:{config.branch} has already a build, "
"set {} as release" f"set {release_ok} as release"
) )
log.info(msg.format(config.branch, release_ok)) logger.info(msg)
if not br.last().done: if not br.last().done:
msg = f"release:{release} is already building" logger.info(f"release:{release} is already building")
log.info(msg)
raise PreviousBuildNotDone(msg) raise PreviousBuildNotDone(msg)
projects = ",".join(config.projects) projects = ",".join(config.projects)
if fake: if fake:
@ -316,7 +314,7 @@ class BuildRelease(models.Model):
return "branch/{}".format(self.branch) return "branch/{}".format(self.branch)
def _next(self, exclude=[]): def _next(self, exclude=[]):
log = logger.bind(release=self) structlog.contextvars.bind_contextvars(release=f"{self}")
if self.built_projects is None: if self.built_projects is None:
return self.build_deps[0][0] return self.build_deps[0][0]
if self.done: if self.done:
@ -333,7 +331,7 @@ class BuildRelease(models.Model):
deps_missing.append(prj) deps_missing.append(prj)
else: else:
if len(deps_missing) > 0: if len(deps_missing) > 0:
log.info( logger.info(
"release has build_deps missing", "release has build_deps missing",
deps_missing=deps_missing, deps_missing=deps_missing,
) )
@ -347,9 +345,9 @@ class BuildRelease(models.Model):
@property @property
def next(self): def next(self):
failed_projects = self.failed_projects_list failed_projects = self.failed_projects_list
log = logger.bind(release=self) structlog.contextvars.bind_contextvars(release=f"{self}")
if any(job in failed_projects for job in settings.BUILD_RELEASE_JOBS): if any(job in failed_projects for job in settings.BUILD_RELEASE_JOBS):
log.info( logger.info(
"release has failed release_jobs, stop sending jobs", "release has failed release_jobs, stop sending jobs",
failed_projects=failed_projects, failed_projects=failed_projects,
) )

@ -35,16 +35,16 @@ def br_manage(sender, **kwargs):
instance = kwargs["instance"] instance = kwargs["instance"]
if instance.release.endswith("-update"): if instance.release.endswith("-update"):
build_resume.delay(instance.pk) build_resume.delay(instance.pk)
logger.debug("BuildRelease:%s triggered", instance) logger.debug(f"BuildRelease:{instance} triggered")
elif timezone.now() > instance.start_date + timedelta(minutes=15): elif timezone.now() > instance.start_date + timedelta(minutes=15):
logger.debug( msg = (
"BuildRelease:%s not triggered, is from the past:%s", f"BuildRelease:{instance} not triggered,",
instance, f" is from the past:{instance.start_date}",
instance.start_date,
) )
logger.debug(msg)
else: else:
build_release.delay(instance.pk) build_release.delay(instance.pk)
logger.debug("BuildRelease:%s triggered", instance) logger.debug(f"BuildRelease:{instance} triggered")
@receiver( @receiver(
@ -62,24 +62,24 @@ def jbi_manage(sender, **kwargs):
if jbi.param_release_uuid is None: if jbi.param_release_uuid is None:
return return
release = jbi.param_release release = jbi.param_release
log = logger.bind( structlog.contextvars.bind_contextvars(
release_uuid=jbi.param_release_uuid, release=jbi.param_release release_uuid=jbi.param_release_uuid, release=jbi.param_release
) )
if jbi.jobname in settings.BUILD_RELEASE_JOBS: if jbi.jobname in settings.BUILD_RELEASE_JOBS:
if not release.startswith("release-"): if not release.startswith("release-"):
release = "release-{}".format(jbi.param_release) release = "release-{}".format(jbi.param_release)
if jbi.param_release_uuid in [None, "none", "", "$release_uuid"]: if jbi.param_release_uuid in [None, "none", "", "$release_uuid"]:
log.debug("no ReleaseBuild link, skip") logger.debug("no ReleaseBuild link, skip")
return return
try: try:
br = BuildRelease.objects.get( br = BuildRelease.objects.get(
uuid=jbi.param_release_uuid, uuid=jbi.param_release_uuid,
) )
except BuildRelease.DoesNotExist: except BuildRelease.DoesNotExist:
log.error("BuildRelease not found") logger.error("BuildRelease not found")
return return
if not br.append_built(jbi): if not br.append_built(jbi):
log.debug("BuildRelease:%s jbi:%s skip", br, jbi) logger.debug(f"BuildRelease:{br} jbi:{jbi} skip")
return return
br.remove_triggered(jbi) br.remove_triggered(jbi)
build_resume.delay(br.id) build_resume.delay(br.id)

@ -27,12 +27,11 @@ logger = structlog.get_logger(__name__)
@shared_task(bind=True) @shared_task(bind=True)
def build_release(self, pk): def build_release(self, pk):
BuildRelease = apps.get_model("build", "BuildRelease") BuildRelease = apps.get_model("build", "BuildRelease")
log = logger.bind(pk=pk)
br = BuildRelease.objects br = BuildRelease.objects
try: try:
instance = br.get(id=pk) instance = br.get(id=pk)
except BuildRelease.DoesNotExist as exc: except BuildRelease.DoesNotExist as exc:
log.warn("BuildRelease not found") logger.warn("BuildRelease not found")
raise self.retry(countdown=60 * 5, exc=exc) raise self.retry(countdown=60 * 5, exc=exc)
if instance.release == "trunk": if instance.release == "trunk":
release = "release-trunk-{}".format(instance.distribution) release = "release-trunk-{}".format(instance.distribution)
@ -41,7 +40,7 @@ def build_release(self, pk):
url = trigger_copy_deps( url = trigger_copy_deps(
release=release, internal=True, release_uuid=instance.uuid release=release, internal=True, release_uuid=instance.uuid
) )
log.info( logger.info(
"BuildRelease copy_deps triggered", instance=str(instance), url=url "BuildRelease copy_deps triggered", instance=str(instance), url=url
) )
@ -49,11 +48,10 @@ def build_release(self, pk):
@shared_task(ignore_result=True) @shared_task(ignore_result=True)
def build_project(pk, project): def build_project(pk, project):
BuildRelease = apps.get_model("build", "BuildRelease") BuildRelease = apps.get_model("build", "BuildRelease")
log = logger.bind(project=project, pk=pk)
try: try:
br = BuildRelease.objects.get(id=pk) br = BuildRelease.objects.get(id=pk)
except BuildRelease.DoesNotExist: except BuildRelease.DoesNotExist:
log.error("can't trigger project on unknown release") logger.error("can't trigger project on unknown release")
return return
url = trigger_build( url = trigger_build(
"{}-get-code".format(project), "{}-get-code".format(project),
@ -63,17 +61,16 @@ def build_project(pk, project):
trigger_distribution=br.distribution, trigger_distribution=br.distribution,
) )
br.pool_size += 1 br.pool_size += 1
log.info("project triggered", url=url, pool_size=br.pool_size) logger.info("project triggered", url=url, pool_size=br.pool_size)
@shared_task(ignore_result=True) @shared_task(ignore_result=True)
def build_resume(pk): def build_resume(pk):
BuildRelease = apps.get_model("build", "BuildRelease") BuildRelease = apps.get_model("build", "BuildRelease")
log = logger.bind(pk=pk)
try: try:
br = BuildRelease.objects.get(id=pk) br = BuildRelease.objects.get(id=pk)
except BuildRelease.DoesNotExist: except BuildRelease.DoesNotExist:
log.error("can't resume on unknown release") logger.error("can't resume on unknown release")
return return
params = { params = {
"release_uuid": br.uuid, "release_uuid": br.uuid,
@ -82,25 +79,27 @@ def build_resume(pk):
"trigger_distribution": br.distribution, "trigger_distribution": br.distribution,
} }
size = settings.BUILD_POOL - br.pool_size size = settings.BUILD_POOL - br.pool_size
log.bind(size=size, pool_size=br.pool_size, br=str(br)) structlog.contextvars.bind_contextvars(
size=size, pool_size=br.pool_size, br=str(br)
)
if size <= 0: if size <= 0:
log.info("No more room for new builds, wait for next slot") logger.info("No more room for new builds, wait for next slot")
for step in range(size): for step in range(size):
prj = br.next prj = br.next
if prj: if prj:
params["project"] = "{}-get-code".format(prj) params["project"] = "{}-get-code".format(prj)
log.debug("trigger project", project=params["project"]) logger.debug("trigger project", project=params["project"])
trigger_build(**params) trigger_build(**params)
br.append_triggered(prj) br.append_triggered(prj)
else: else:
log.debug("BuildRelease has no next") logger.debug("BuildRelease has no next")
if not br.done: if not br.done:
log.debug("not done yet") logger.debug("not done yet")
continue continue
if br.release == "release-trunk-weekly": if br.release == "release-trunk-weekly":
url = trigger_build_matrix(br) url = trigger_build_matrix(br)
if url is not None: if url is not None:
log.info( logger.info(
"build_matrix triggered", instance=str(br), url=url "build_matrix triggered", instance=str(br), url=url
) )
break break

@ -96,7 +96,7 @@ def trigger_build_matrix(br):
logger.info("{} already triggered, skip".format(params["job"])) logger.info("{} already triggered, skip".format(params["job"]))
return return
if settings.DEBUG: if settings.DEBUG:
logger.info("Debug mode, would trigger: %s", url) logger.info(f"Debug mode, would trigger: {url}")
else: else:
open_jenkins_url(url) open_jenkins_url(url)
return "{base}/job/{job}/".format(**params) return "{base}/job/{job}/".format(**params)
@ -121,7 +121,7 @@ def trigger_copy_deps(release, internal, release_uuid, uuid=None):
} }
url = copy_deps_url.format(**params) url = copy_deps_url.format(**params)
if settings.DEBUG: if settings.DEBUG:
logger.info("Debug mode, would trigger: %s", url) logger.info(f"Debug mode, would trigger: {url}")
else: else:
open_jenkins_url(url) open_jenkins_url(url)
return "{base}/job/{job}/".format(**params) return "{base}/job/{job}/".format(**params)
@ -165,7 +165,7 @@ def trigger_build(
url = project_url.format(**params) url = project_url.format(**params)
if settings.DEBUG: if settings.DEBUG:
logger.info("Debug mode, would trigger: %s", url) logger.info(f"Debug mode, would trigger: {url}")
else: else:
open_jenkins_url(url) open_jenkins_url(url)
return "{base}/job/{job}/".format(**params) return "{base}/job/{job}/".format(**params)

@ -22,7 +22,7 @@ logger = structlog.get_logger(__name__)
def process_hotfix(jbi_info, projectname, path): def process_hotfix(jbi_info, projectname, path):
model = NoteInfo.get_model() model = NoteInfo.get_model()
logger.info("hotfix_released[%s] %s", jbi_info, path) logger.info(f"hotfix_released[{jbi_info}] {path}")
ids, changelog = parse_changelog(path, model) ids, changelog = parse_changelog(path, model)
for wid in ids: for wid in ids:
model.create(wid, projectname, changelog.full_version) model.create(wid, projectname, changelog.full_version)

@ -57,7 +57,7 @@ def docker_fetch_project(projectname):
project = Project.objects.get(name=projectname) project = Project.objects.get(name=projectname)
for imagename in project.filter_docker_images(images): for imagename in project.filter_docker_images(images):
image = DockerImage.objects.create(name=imagename, project=project) image = DockerImage.objects.create(name=imagename, project=project)
logger.debug("%s created" % image) logger.debug(f"{image} created")
docker_fetch_info.delay(image.name) docker_fetch_info.delay(image.name)
@ -72,7 +72,7 @@ def docker_fetch_all():
project, _ = Project.objects.get_or_create(name=projectname) project, _ = Project.objects.get_or_create(name=projectname)
for imagename in project.filter_docker_images(images): for imagename in project.filter_docker_images(images):
image = DockerImage.objects.create(name=imagename, project=project) image = DockerImage.objects.create(name=imagename, project=project)
logger.debug("%s created" % image) logger.debug(f"{image} created")
docker_fetch_info.delay(image.name) docker_fetch_info.delay(image.name)

@ -61,7 +61,7 @@ def trigger_hotfix(project, branch, user, push="yes", empty=False):
url = hotfix_url.format(**params) url = hotfix_url.format(**params)
if settings.DEBUG: if settings.DEBUG:
logger.warn("Debug mode, would trigger: %s", url) logger.warn(f"Debug mode, would trigger: {url}")
else: else:
open_jenkins_url(url) open_jenkins_url(url)
res = [ res = [

@ -48,7 +48,7 @@ def trigger_docker_build(project, branch):
url = docker_url.format(**params) url = docker_url.format(**params)
if settings.DEBUG: if settings.DEBUG:
logger.debug("Debug mode, would trigger: %s", url) logger.debug(f"Debug mode, would trigger: {url}")
else: else:
open_jenkins_url(url) open_jenkins_url(url)
return "{base}/job/build-project-docker/".format(**params) return "{base}/job/build-project-docker/".format(**params)
@ -56,13 +56,13 @@ def trigger_docker_build(project, branch):
def _get_info(url, headers=None): def _get_info(url, headers=None):
if settings.DEBUG: if settings.DEBUG:
logger.debug("Debug mode, would trigger: %s", url) logger.debug(f"Debug mode, would trigger: {url}")
else: else:
if headers: if headers:
logger.debug("trigger: %s, headers: '%s'", url, headers) logger.debug(f"trigger: {url}, headers: '{headers}'")
response = requests.get(url, headers) response = requests.get(url, headers)
else: else:
logger.debug("trigger: %s", url) logger.debug(f"trigger: {url}")
response = requests.get(url) response = requests.get(url)
logger.debug("response: %s" % response) logger.debug("response: %s" % response)
response.raise_for_status() response.raise_for_status()
@ -84,11 +84,11 @@ def get_docker_manifests_info(url):
def delete_docker_info(url): def delete_docker_info(url):
if settings.DEBUG: if settings.DEBUG:
logger.debug("Debug mode, would trigger: %s", url) logger.debug(f"Debug mode, would trigger: {url}")
else: else:
logger.debug("trigger: %s", url) logger.debug(f"trigger:{url}")
response = requests.delete(url) response = requests.delete(url)
logger.debug("response: %s" % response) logger.debug(f"response: {response}")
response.raise_for_status() response.raise_for_status()
return return
@ -101,7 +101,7 @@ def get_docker_repositories():
url = settings.DOCKER_REGISTRY_URL.format("_catalog") url = settings.DOCKER_REGISTRY_URL.format("_catalog")
try: try:
info = get_docker_info(url) info = get_docker_info(url)
logger.debug("response: %s" % info) logger.debug(f"response: {info}")
result = json.loads(info) result = json.loads(info)
return result["repositories"] return result["repositories"]
except Exception as e: except Exception as e:
@ -116,14 +116,14 @@ def get_docker_tags(image):
except KeyError: except KeyError:
return [] return []
else: else:
url = settings.DOCKER_REGISTRY_URL.format("%s/tags/list" % image) url = settings.DOCKER_REGISTRY_URL.format(f"{image}/tags/list")
try: try:
info = get_docker_info(url) info = get_docker_info(url)
logger.debug("response: %s" % info) logger.debug("response: %s" % info)
result = json.loads(info) result = json.loads(info)
return result["tags"] return result["tags"]
except Exception as e: except Exception as e:
logger.error("image: %s %s" % (image, e)) logger.error(f"image: {image} {e}")
return [] return []
@ -132,34 +132,34 @@ def get_docker_manifests(image, tag):
return ("{}", uuid.uuid4()) return ("{}", uuid.uuid4())
else: else:
dru = settings.DOCKER_REGISTRY_URL dru = settings.DOCKER_REGISTRY_URL
url = dru.format("%s/manifests/%s" % (image, tag)) url = dru.format(f"{image}/manifests/{tag}")
try: try:
info, digest = get_docker_manifests_info(url) info, digest = get_docker_manifests_info(url)
logger.debug("response: %s" % info) logger.debug(f"response: {info}")
result = json.loads(info) result = json.loads(info)
return (result, digest) return (result, digest)
except Exception as e: except Exception as e:
logger.error("image: %s tag:%s %s" % (image, tag, e)) logger.error(f"image: {image} tag:{tag} {e}")
return (None, None) return (None, None)
def delete_tag(image, reference, tag_name): def delete_tag(image, reference, tag_name):
try: try:
dru = settings.DOCKER_REGISTRY_URL dru = settings.DOCKER_REGISTRY_URL
url = dru.format("%s/manifests/%s" % (image, reference)) url = dru.format(f"{image}/manifests/{reference}")
logger.debug("docker delete_tag(%s)" % (url)) logger.debug(f"docker delete_tag({url})")
delete_docker_info(url) delete_docker_info(url)
except Exception: except Exception:
# it does not work for some, retrieve Docker-Content-Digest from # it does not work for some, retrieve Docker-Content-Digest from
# manifests and delete using that as reference # manifests and delete using that as reference
dru = settings.DOCKER_REGISTRY_URL dru = settings.DOCKER_REGISTRY_URL
url = dru.format("%s/manifests/%s" % (image, tag_name)) url = dru.format(f"{image}/manifests/{tag_name}")
logger.debug("docker delete_tag() get_docker_manif_info(): %s" % (url)) logger.debug(f"docker delete_tag() get_docker_manif_info(): {url}")
response = get_docker_manifests_info(url) response = get_docker_manifests_info(url)
logger.debug(" - response text: %s" % (response[0])) logger.debug(" - response text: %s" % (response[0]))
logger.debug(" - response Docker-Content-Digest: %s" % (response[1])) logger.debug(" - response Docker-Content-Digest: %s" % (response[1]))
dru = settings.DOCKER_REGISTRY_URL dru = settings.DOCKER_REGISTRY_URL
url = dru.format("%s/manifests/%s" % (image, response[1])) url = dru.format("%s/manifests/%s" % (image, response[1]))
logger.info("docker delete_tag() will delete: %s" % (url)) logger.info(f"docker delete_tag() will delete: {url}")
delete_docker_info(url) delete_docker_info(url)

@ -84,12 +84,12 @@ def build_release(request, release):
@permission_required("build.can_trigger_hotfix", raise_exception=True) @permission_required("build.can_trigger_hotfix", raise_exception=True)
def hotfix_build(request, branch, project): def hotfix_build(request, branch, project):
if project not in settings.RELEASE_DASHBOARD_PROJECTS: if project not in settings.RELEASE_DASHBOARD_PROJECTS:
error = "repo:%s not valid" % project error = f"repo:{project} not valid"
logger.error(error) logger.error(error)
return HttpResponseNotFound(error) return HttpResponseNotFound(error)
if not regex_hotfix.match(branch): if not regex_hotfix.match(branch):
error = "branch:%s not valid. Not mrX.X.X format" % branch error = f"branch:{branch} not valid. Not mrX.X.X format"
logger.error(error) logger.error(error)
return HttpResponseNotFound(error) return HttpResponseNotFound(error)
proj = Project.objects.get(name=project) proj = Project.objects.get(name=project)
@ -103,7 +103,7 @@ def hotfix_build(request, branch, project):
push = json_data.get("push", "no") push = json_data.get("push", "no")
empty = json_data.get("empty", False) empty = json_data.get("empty", False)
if push == "no": if push == "no":
logger.warn("dry-run for %s:%s", project, branch) logger.warn(f"dry-run for {project}:{branch}")
urls = build.trigger_hotfix(project, branch, request.user, push, empty) urls = build.trigger_hotfix(project, branch, request.user, push, empty)
return JsonResponse({"urls": urls}) return JsonResponse({"urls": urls})
@ -132,7 +132,7 @@ def hotfix_release_build(request, release, project):
error = f"branch:{release_config.branch} not valid. Not mrX.X.X format" error = f"branch:{release_config.branch} not valid. Not mrX.X.X format"
logger.error(error) logger.error(error)
return HttpResponseNotFound(error) return HttpResponseNotFound(error)
logger.debug(body=request.body) structlog.contextvars.bind_contextvars(body=request.body)
json_data = json.loads(request.body.decode("utf-8")) json_data = json.loads(request.body.decode("utf-8"))
push = json_data.get("push", "no") push = json_data.get("push", "no")
empty = json_data.get("empty", False) empty = json_data.get("empty", False)

@ -44,19 +44,19 @@ logger = structlog.get_logger(__name__)
def _get_docker_tags(project, tag=None): def _get_docker_tags(project, tag=None):
repos = docker.get_docker_repositories() repos = docker.get_docker_repositories()
r = re.compile(".*%s.*" % project) r = re.compile(f".*{project}.*")
project_repos = filter(r.match, repos) project_repos = filter(r.match, repos)
logger.debug("%s: %s" % (project, project_repos)) logger.debug(f"{project}: {project_repos}")
docker_tags = [] docker_tags = []
for image in project_repos: for image in project_repos:
res = {"name": image} res = {"name": image}
tags = docker.get_docker_tags(image) tags = docker.get_docker_tags(image)
if tag: if tag:
logger.debug("non filtered tags: %s" % tags) logger.debug(f"non filtered tags: {tags}")
tags = filter(re.compile(tag).match, tags) tags = filter(re.compile(tag).match, tags)
res["tags"] = tags res["tags"] = tags
docker_tags.append(res) docker_tags.append(res)
logger.debug("docker_tags: %s" % docker_tags) logger.debug(f"docker_tags: {docker_tags}")
return docker_tags return docker_tags
@ -73,8 +73,8 @@ def _build_docker_logic(form, projects):
url = docker.trigger_docker_build(pro, result[pro]) url = docker.trigger_docker_build(pro, result[pro])
context["projects"].append({"name": pro, "url": url}) context["projects"].append({"name": pro, "url": url})
except KeyError: except KeyError:
msg = "Houston, we have a problem with trigger for %s" msg = f"Houston, we have a problem with trigger for {pro}"
logger.error(msg, pro) logger.error(msg)
context["projects"].append({"name": pro, "url": None}) context["projects"].append({"name": pro, "url": None})
return context return context

@ -72,6 +72,7 @@ def receiver_setup_logging(loglevel, logfile, format, colorize, **kwargs):
structlog.configure( structlog.configure(
processors=[ processors=[
structlog.contextvars.merge_contextvars,
structlog.stdlib.filter_by_level, structlog.stdlib.filter_by_level,
structlog.stdlib.add_logger_name, structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level, structlog.stdlib.add_log_level,
@ -82,9 +83,7 @@ def receiver_setup_logging(loglevel, logfile, format, colorize, **kwargs):
structlog.processors.ExceptionPrettyPrinter(), structlog.processors.ExceptionPrettyPrinter(),
structlog.stdlib.ProcessorFormatter.wrap_for_formatter, structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
], ],
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory(), logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True, cache_logger_on_first_use=True,
) )

@ -162,7 +162,9 @@ LOGGING = {
structlog.configure( structlog.configure(
processors=[ processors=[
structlog.contextvars.merge_contextvars,
structlog.stdlib.filter_by_level, structlog.stdlib.filter_by_level,
structlog.processors.TimeStamper(fmt="iso"),
structlog.stdlib.add_logger_name, structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level, structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(), structlog.stdlib.PositionalArgumentsFormatter(),
@ -172,9 +174,7 @@ structlog.configure(
structlog.processors.ExceptionPrettyPrinter(), structlog.processors.ExceptionPrettyPrinter(),
structlog.stdlib.ProcessorFormatter.wrap_for_formatter, structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
], ],
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory(), logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True, cache_logger_on_first_use=True,
) )

@ -40,11 +40,8 @@ def jbi_manage(sender, **kwargs):
def gerrit_repo_add(instance): def gerrit_repo_add(instance):
log = logger.bind(
instance=str(instance),
)
if instance.param_ppa == "$ppa": if instance.param_ppa == "$ppa":
log.warn("ppa unset, skip removal") logger.warn("ppa unset, skip removal")
return return
GerritRepoInfo = apps.get_model("repoapi", "GerritRepoInfo") GerritRepoInfo = apps.get_model("repoapi", "GerritRepoInfo")
gri = GerritRepoInfo.objects gri = GerritRepoInfo.objects
@ -54,19 +51,16 @@ def gerrit_repo_add(instance):
defaults={"projectname": instance.projectname}, defaults={"projectname": instance.projectname},
) )
if created: if created:
log.debug("ppa created", ppa=str(ppa)) logger.debug("ppa created", ppa=str(ppa))
elif ppa.projectname == "unknown": elif ppa.projectname == "unknown":
ppa.projectname = instance.projectname ppa.projectname = instance.projectname
ppa.save() ppa.save()
log.info("ppa projectname updated") logger.info("ppa projectname updated")
def gerrit_repo_del(instance): def gerrit_repo_del(instance):
log = logger.bind(
instance=str(instance),
)
if instance.param_ppa == "$ppa": if instance.param_ppa == "$ppa":
log.warn("ppa unset, skip removal") logger.warn("ppa unset, skip removal")
return return
GerritRepoInfo = apps.get_model("repoapi", "GerritRepoInfo") GerritRepoInfo = apps.get_model("repoapi", "GerritRepoInfo")
gri = GerritRepoInfo.objects gri = GerritRepoInfo.objects
@ -75,7 +69,7 @@ def gerrit_repo_del(instance):
param_ppa=instance.param_ppa, gerrit_change=instance.gerrit_change param_ppa=instance.param_ppa, gerrit_change=instance.gerrit_change
) )
ppa.delete() ppa.delete()
log.debug("removed ppa", ppa=str(ppa)) logger.debug("removed ppa", ppa=str(ppa))
except GerritRepoInfo.DoesNotExist: except GerritRepoInfo.DoesNotExist:
pass pass
qs = gri.filter(param_ppa=instance.param_ppa) qs = gri.filter(param_ppa=instance.param_ppa)
@ -84,10 +78,10 @@ def gerrit_repo_del(instance):
if ppa_count == 0: if ppa_count == 0:
utils.jenkins_remove_ppa(instance.param_ppa) utils.jenkins_remove_ppa(instance.param_ppa)
elif project_ppa_count == 0: elif project_ppa_count == 0:
log.debug("remove source+packages from ppa") logger.debug("remove source+packages from ppa")
jenkins_remove_project.delay(instance.id) jenkins_remove_project.delay(instance.id)
else: else:
log.debug( logger.debug(
"nothing to do here", "nothing to do here",
ppa_count=ppa_count, ppa_count=ppa_count,
project_ppa_count=project_ppa_count, project_ppa_count=project_ppa_count,
@ -102,12 +96,12 @@ def gerrit_repo_del(instance):
def gerrit_repo_manage(sender, **kwargs): def gerrit_repo_manage(sender, **kwargs):
if kwargs["created"]: if kwargs["created"]:
instance = kwargs["instance"] instance = kwargs["instance"]
log = logger.bind( structlog.contextvars.bind_contextvars(
instance=str(instance), instance=str(instance),
ppa=instance.param_ppa, ppa=instance.param_ppa,
) )
if instance.param_ppa == "$ppa": if instance.param_ppa == "$ppa":
log.warn("ppa unset, skip") logger.warn("ppa unset, skip")
return return
if ( if (
instance.jobname.endswith("-repos") instance.jobname.endswith("-repos")
@ -123,14 +117,14 @@ def gerrit_repo_manage(sender, **kwargs):
and instance.result == "SUCCESS" and instance.result == "SUCCESS"
and instance.gerrit_eventtype == "change-abandoned" and instance.gerrit_eventtype == "change-abandoned"
): ):
log.debug("we need to count this") logger.debug("we need to count this")
gerrit_repo_del(instance) gerrit_repo_del(instance)
def tracker_release_target(instance, note: NoteInfo): def tracker_release_target(instance, note: NoteInfo):
if not is_ngcp_project(instance.projectname): if not is_ngcp_project(instance.projectname):
logger.info( logger.info(
"%s not a NGCP project, skip release_target", instance.projectname "{instance.projectname} not a NGCP project, skip release_target"
) )
return return
branch = instance.param_branch branch = instance.param_branch

@ -37,7 +37,7 @@ logger = structlog.get_logger(__name__)
def jenkins_remove_project(self, jbi_id): def jenkins_remove_project(self, jbi_id):
JenkinsBuildInfo = apps.get_model("repoapi", "JenkinsBuildInfo") JenkinsBuildInfo = apps.get_model("repoapi", "JenkinsBuildInfo")
jbi = JenkinsBuildInfo.objects.get(id=jbi_id) jbi = JenkinsBuildInfo.objects.get(id=jbi_id)
log = logger.bind( structlog.contextvars.bind_contextvars(
jbi=str(jbi), jbi=str(jbi),
) )
if ( if (
@ -48,42 +48,33 @@ def jenkins_remove_project(self, jbi_id):
try: try:
jenkins_remove_project_ppa(jbi.param_ppa, jbi.source) jenkins_remove_project_ppa(jbi.param_ppa, jbi.source)
except FileNotFoundError as exc: except FileNotFoundError as exc:
log.warn("source is not there yet, try again in 60 secs") logger.warn("source is not there yet, try again in 60 secs")
raise self.retry(exc=exc, countdown=60) raise self.retry(exc=exc, countdown=60)
@shared_task(ignore_result=True) @shared_task(ignore_result=True)
def jbi_get_artifact(jbi_id, jobname, buildnumber, artifact_info): def jbi_get_artifact(jbi_id, jobname, buildnumber, artifact_info):
log = logger.bind(
jbi_id=jbi_id,
jobname=jobname,
)
path = jenkins_get_artifact(jobname, buildnumber, artifact_info) path = jenkins_get_artifact(jobname, buildnumber, artifact_info)
if path.name == settings.HOTFIX_ARTIFACT: if path.name == settings.HOTFIX_ARTIFACT:
if settings.TRACKER_PROVIDER == Tracker.NONE: if settings.TRACKER_PROVIDER == Tracker.NONE:
log.info("no tracker defined, skip hotfix management") logger.info("no tracker defined, skip hotfix management")
return return
jbi_parse_hotfix.delay(jbi_id, str(path)) jbi_parse_hotfix.delay(jbi_id, str(path))
@shared_task(ignore_result=True) @shared_task(ignore_result=True)
def get_jbi_files(jbi_id, jobname, buildnumber): def get_jbi_files(jbi_id, jobname, buildnumber):
log = logger.bind(
jbi_id=jbi_id,
jobname=jobname,
buildnumber=buildnumber,
)
jenkins_get_console(jobname, buildnumber) jenkins_get_console(jobname, buildnumber)
path_envVars = jenkins_get_env(jobname, buildnumber) path_envVars = jenkins_get_env(jobname, buildnumber)
path_build = jenkins_get_build(jobname, buildnumber) path_build = jenkins_get_build(jobname, buildnumber)
if is_download_artifacts(jobname): if is_download_artifacts(jobname):
with open(path_build) as data_file: with open(path_build) as data_file:
data = json.load(data_file) data = json.load(data_file)
log.debug("job_info", data=data) logger.debug("job_info", data=data)
for artifact in data["artifacts"]: for artifact in data["artifacts"]:
jbi_get_artifact.delay(jbi_id, jobname, buildnumber, artifact) jbi_get_artifact.delay(jbi_id, jobname, buildnumber, artifact)
else: else:
log.debug("skip artifacts download") logger.debug("skip artifacts download")
if jobname in settings.RELEASE_CHANGED_JOBS: if jobname in settings.RELEASE_CHANGED_JOBS:
process_result.delay(jbi_id, str(path_envVars)) process_result.delay(jbi_id, str(path_envVars))
@ -92,4 +83,4 @@ def get_jbi_files(jbi_id, jobname, buildnumber):
def jbi_purge(release, weeks): def jbi_purge(release, weeks):
JenkinsBuildInfo = apps.get_model("repoapi", "JenkinsBuildInfo") JenkinsBuildInfo = apps.get_model("repoapi", "JenkinsBuildInfo")
JenkinsBuildInfo.objects.purge_release(release, timedelta(weeks=weeks)) JenkinsBuildInfo.objects.purge_release(release, timedelta(weeks=weeks))
logger.info("purged release %s jbi older than %s weeks" % (release, weeks)) logger.info(f"purged release {release} jbi older than {weeks} weeks")

@ -31,15 +31,11 @@ JBI_ENVVARS_URL = "{}/job/{}/{}/injectedEnvVars/api/json"
def executeAndReturnOutput(command, env=None): def executeAndReturnOutput(command, env=None):
log = logger.bind(
command=command,
env=env,
)
proc = subprocess.Popen( proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
) )
stdoutdata, stderrdata = proc.communicate() stdoutdata, stderrdata = proc.communicate()
log.debug("command done", stdout=stdoutdata, stderr=stderrdata) logger.debug("command done", stdout=stdoutdata, stderr=stderrdata)
return proc.returncode, stdoutdata, stderrdata return proc.returncode, stdoutdata, stderrdata
@ -53,17 +49,17 @@ def get_jenkins_response(url):
def dlfile(url, path: Path): def dlfile(url, path: Path):
log = logger.bind( structlog.contextvars.bind_contextvars(
url=url, url=url,
path=str(path), path=str(path),
) )
if settings.DEBUG: if settings.DEBUG:
log.info("_NOT_ calling due to DEBUG is set") logger.info("_NOT_ calling due to DEBUG is set")
else: else:
auth = HTTPBasicAuth( auth = HTTPBasicAuth(
settings.JENKINS_HTTP_USER, settings.JENKINS_HTTP_PASSWD settings.JENKINS_HTTP_USER, settings.JENKINS_HTTP_PASSWD
) )
log.debug("get request") logger.debug("get request")
req = requests.get(url, auth=auth) req = requests.get(url, auth=auth)
with open(path, "wb") as local_file: with open(path, "wb") as local_file:
for chunk in req.iter_content(chunk_size=128): for chunk in req.iter_content(chunk_size=128):
@ -71,18 +67,15 @@ def dlfile(url, path: Path):
def open_jenkins_url(url): def open_jenkins_url(url):
log = logger.bind( logger.debug("Trying to retrieve")
url=url,
)
log.debug("Trying to retrieve")
try: try:
res = get_jenkins_response(url) res = get_jenkins_response(url)
log.debug("OK", status_code=res.status_code) logger.debug("OK", status_code=res.status_code)
return True return True
except requests.HTTPError as e: except requests.HTTPError as e:
log.error("Error %s", e, status_code=res.status_code) logger.error("Error %s", e, status_code=res.status_code)
except Exception as e: except Exception as e:
log.error("Fatal error retrieving:", error=str(e)) logger.error("Fatal error retrieving:", error=str(e))
return False return False
@ -93,12 +86,11 @@ def jenkins_remove_ppa(repo):
"token=%s&repository=%s" "token=%s&repository=%s"
% (settings.JENKINS_URL, settings.JENKINS_TOKEN, repo) % (settings.JENKINS_URL, settings.JENKINS_TOKEN, repo)
) )
log = logger.bind( structlog.contextvars.bind_contextvars(
repo=repo,
url=url, url=url,
) )
if settings.DEBUG: if settings.DEBUG:
log.debug("_NOT_ calling due to DEBUG is set") logger.debug("_NOT_ calling due to DEBUG is set")
else: else:
open_jenkins_url(url) open_jenkins_url(url)
@ -109,15 +101,13 @@ def jenkins_remove_project_ppa(repo, source):
"token=%s&repository=%s&source=%s" "token=%s&repository=%s&source=%s"
% (settings.JENKINS_URL, settings.JENKINS_TOKEN, repo, source) % (settings.JENKINS_URL, settings.JENKINS_TOKEN, repo, source)
) )
log = logger.bind( structlog.contextvars.bind_contextvars(
repo=repo,
source=source,
url=url, url=url,
) )
if source is None: if source is None:
raise FileNotFoundError() raise FileNotFoundError()
if settings.DEBUG: if settings.DEBUG:
log.debug("_NOT_ calling due to DEBUG is set") logger.debug("_NOT_ calling due to DEBUG is set")
else: else:
open_jenkins_url(url) open_jenkins_url(url)
@ -125,12 +115,12 @@ def jenkins_remove_project_ppa(repo, source):
def _jenkins_get(url, base_path: Path, filename) -> Path: def _jenkins_get(url, base_path: Path, filename) -> Path:
base_path.mkdir(parents=True, exist_ok=True) base_path.mkdir(parents=True, exist_ok=True)
path = base_path.joinpath(filename) path = base_path.joinpath(filename)
log = logger.bind( structlog.contextvars.bind_contextvars(
base_path=str(base_path), base_path=str(base_path),
filename=filename, filename=filename,
url=url, url=url,
) )
log.debug("download file from jenkins") logger.debug("download file from jenkins")
dlfile(url, path) dlfile(url, path)
return path return path

@ -62,11 +62,6 @@ def workfront_set_release_target(_id, release):
def mantis_query(method, url, payload=None) -> requests.Response: def mantis_query(method, url, payload=None) -> requests.Response:
logger.bind(
method=method,
url=url,
payload=payload,
)
response = requests.request( response = requests.request(
f"{method}", url, headers=MANTIS_HEADERS, data=payload f"{method}", url, headers=MANTIS_HEADERS, data=payload
) )

Loading…
Cancel
Save