MT#33006 repoapi: upgrade structlog config (3.0+)

https: //django-structlog.readthedocs.io/en/latest/upgrade_guide.html
Change-Id: I76b4f26aca2a763fb106bfe6c7c85ea3b6c06d16
pull/9/head
Victor Seva 3 years ago
parent dc4d537f3d
commit 951a274c51

@ -50,7 +50,6 @@ class BuildReleaseManager(models.Manager):
)
def create_build_release(self, uuid, release, fake=False):
log = logger.bind(uuid=str(uuid), release=release, fake=fake)
config = ReleaseConfig(release)
qs = self.get_queryset()
br = qs.filter(
@ -59,18 +58,17 @@ class BuildReleaseManager(models.Manager):
release_ok = config.release
if br.exists():
if regex_mrXXX.match(config.branch):
msg = "release[mrX.Y.Z]:{} has already a build"
raise BuildReleaseUnique(msg.format(release))
msg = f"release[mrX.Y.Z]:{release} has already a build"
raise BuildReleaseUnique(msg)
elif regex_mrXX.match(config.branch):
release_ok = "{}-update".format(config.release)
release_ok = f"{config.release}-update"
msg = (
"release[mrX.Y]:{} has already a build, "
"set {} as release"
f"release[mrX.Y]:{config.branch} has already a build, "
f"set {release_ok} as release"
)
log.info(msg.format(config.branch, release_ok))
logger.info(msg)
if not br.last().done:
msg = f"release:{release} is already building"
log.info(msg)
logger.info(f"release:{release} is already building")
raise PreviousBuildNotDone(msg)
projects = ",".join(config.projects)
if fake:
@ -316,7 +314,7 @@ class BuildRelease(models.Model):
return "branch/{}".format(self.branch)
def _next(self, exclude=[]):
log = logger.bind(release=self)
structlog.contextvars.bind_contextvars(release=f"{self}")
if self.built_projects is None:
return self.build_deps[0][0]
if self.done:
@ -333,7 +331,7 @@ class BuildRelease(models.Model):
deps_missing.append(prj)
else:
if len(deps_missing) > 0:
log.info(
logger.info(
"release has build_deps missing",
deps_missing=deps_missing,
)
@ -347,9 +345,9 @@ class BuildRelease(models.Model):
@property
def next(self):
failed_projects = self.failed_projects_list
log = logger.bind(release=self)
structlog.contextvars.bind_contextvars(release=f"{self}")
if any(job in failed_projects for job in settings.BUILD_RELEASE_JOBS):
log.info(
logger.info(
"release has failed release_jobs, stop sending jobs",
failed_projects=failed_projects,
)

@ -35,16 +35,16 @@ def br_manage(sender, **kwargs):
instance = kwargs["instance"]
if instance.release.endswith("-update"):
build_resume.delay(instance.pk)
logger.debug("BuildRelease:%s triggered", instance)
logger.debug(f"BuildRelease:{instance} triggered")
elif timezone.now() > instance.start_date + timedelta(minutes=15):
logger.debug(
"BuildRelease:%s not triggered, is from the past:%s",
instance,
instance.start_date,
msg = (
f"BuildRelease:{instance} not triggered,",
f" is from the past:{instance.start_date}",
)
logger.debug(msg)
else:
build_release.delay(instance.pk)
logger.debug("BuildRelease:%s triggered", instance)
logger.debug(f"BuildRelease:{instance} triggered")
@receiver(
@ -62,24 +62,24 @@ def jbi_manage(sender, **kwargs):
if jbi.param_release_uuid is None:
return
release = jbi.param_release
log = logger.bind(
structlog.contextvars.bind_contextvars(
release_uuid=jbi.param_release_uuid, release=jbi.param_release
)
if jbi.jobname in settings.BUILD_RELEASE_JOBS:
if not release.startswith("release-"):
release = "release-{}".format(jbi.param_release)
if jbi.param_release_uuid in [None, "none", "", "$release_uuid"]:
log.debug("no ReleaseBuild link, skip")
logger.debug("no ReleaseBuild link, skip")
return
try:
br = BuildRelease.objects.get(
uuid=jbi.param_release_uuid,
)
except BuildRelease.DoesNotExist:
log.error("BuildRelease not found")
logger.error("BuildRelease not found")
return
if not br.append_built(jbi):
log.debug("BuildRelease:%s jbi:%s skip", br, jbi)
logger.debug(f"BuildRelease:{br} jbi:{jbi} skip")
return
br.remove_triggered(jbi)
build_resume.delay(br.id)

@ -27,12 +27,11 @@ logger = structlog.get_logger(__name__)
@shared_task(bind=True)
def build_release(self, pk):
BuildRelease = apps.get_model("build", "BuildRelease")
log = logger.bind(pk=pk)
br = BuildRelease.objects
try:
instance = br.get(id=pk)
except BuildRelease.DoesNotExist as exc:
log.warn("BuildRelease not found")
logger.warn("BuildRelease not found")
raise self.retry(countdown=60 * 5, exc=exc)
if instance.release == "trunk":
release = "release-trunk-{}".format(instance.distribution)
@ -41,7 +40,7 @@ def build_release(self, pk):
url = trigger_copy_deps(
release=release, internal=True, release_uuid=instance.uuid
)
log.info(
logger.info(
"BuildRelease copy_deps triggered", instance=str(instance), url=url
)
@ -49,11 +48,10 @@ def build_release(self, pk):
@shared_task(ignore_result=True)
def build_project(pk, project):
BuildRelease = apps.get_model("build", "BuildRelease")
log = logger.bind(project=project, pk=pk)
try:
br = BuildRelease.objects.get(id=pk)
except BuildRelease.DoesNotExist:
log.error("can't trigger project on unknown release")
logger.error("can't trigger project on unknown release")
return
url = trigger_build(
"{}-get-code".format(project),
@ -63,17 +61,16 @@ def build_project(pk, project):
trigger_distribution=br.distribution,
)
br.pool_size += 1
log.info("project triggered", url=url, pool_size=br.pool_size)
logger.info("project triggered", url=url, pool_size=br.pool_size)
@shared_task(ignore_result=True)
def build_resume(pk):
BuildRelease = apps.get_model("build", "BuildRelease")
log = logger.bind(pk=pk)
try:
br = BuildRelease.objects.get(id=pk)
except BuildRelease.DoesNotExist:
log.error("can't resume on unknown release")
logger.error("can't resume on unknown release")
return
params = {
"release_uuid": br.uuid,
@ -82,25 +79,27 @@ def build_resume(pk):
"trigger_distribution": br.distribution,
}
size = settings.BUILD_POOL - br.pool_size
log.bind(size=size, pool_size=br.pool_size, br=str(br))
structlog.contextvars.bind_contextvars(
size=size, pool_size=br.pool_size, br=str(br)
)
if size <= 0:
log.info("No more room for new builds, wait for next slot")
logger.info("No more room for new builds, wait for next slot")
for step in range(size):
prj = br.next
if prj:
params["project"] = "{}-get-code".format(prj)
log.debug("trigger project", project=params["project"])
logger.debug("trigger project", project=params["project"])
trigger_build(**params)
br.append_triggered(prj)
else:
log.debug("BuildRelease has no next")
logger.debug("BuildRelease has no next")
if not br.done:
log.debug("not done yet")
logger.debug("not done yet")
continue
if br.release == "release-trunk-weekly":
url = trigger_build_matrix(br)
if url is not None:
log.info(
logger.info(
"build_matrix triggered", instance=str(br), url=url
)
break

@ -96,7 +96,7 @@ def trigger_build_matrix(br):
logger.info("{} already triggered, skip".format(params["job"]))
return
if settings.DEBUG:
logger.info("Debug mode, would trigger: %s", url)
logger.info(f"Debug mode, would trigger: {url}")
else:
open_jenkins_url(url)
return "{base}/job/{job}/".format(**params)
@ -121,7 +121,7 @@ def trigger_copy_deps(release, internal, release_uuid, uuid=None):
}
url = copy_deps_url.format(**params)
if settings.DEBUG:
logger.info("Debug mode, would trigger: %s", url)
logger.info(f"Debug mode, would trigger: {url}")
else:
open_jenkins_url(url)
return "{base}/job/{job}/".format(**params)
@ -165,7 +165,7 @@ def trigger_build(
url = project_url.format(**params)
if settings.DEBUG:
logger.info("Debug mode, would trigger: %s", url)
logger.info(f"Debug mode, would trigger: {url}")
else:
open_jenkins_url(url)
return "{base}/job/{job}/".format(**params)

@ -22,7 +22,7 @@ logger = structlog.get_logger(__name__)
def process_hotfix(jbi_info, projectname, path):
model = NoteInfo.get_model()
logger.info("hotfix_released[%s] %s", jbi_info, path)
logger.info(f"hotfix_released[{jbi_info}] {path}")
ids, changelog = parse_changelog(path, model)
for wid in ids:
model.create(wid, projectname, changelog.full_version)

@ -57,7 +57,7 @@ def docker_fetch_project(projectname):
project = Project.objects.get(name=projectname)
for imagename in project.filter_docker_images(images):
image = DockerImage.objects.create(name=imagename, project=project)
logger.debug("%s created" % image)
logger.debug(f"{image} created")
docker_fetch_info.delay(image.name)
@ -72,7 +72,7 @@ def docker_fetch_all():
project, _ = Project.objects.get_or_create(name=projectname)
for imagename in project.filter_docker_images(images):
image = DockerImage.objects.create(name=imagename, project=project)
logger.debug("%s created" % image)
logger.debug(f"{image} created")
docker_fetch_info.delay(image.name)

@ -61,7 +61,7 @@ def trigger_hotfix(project, branch, user, push="yes", empty=False):
url = hotfix_url.format(**params)
if settings.DEBUG:
logger.warn("Debug mode, would trigger: %s", url)
logger.warn(f"Debug mode, would trigger: {url}")
else:
open_jenkins_url(url)
res = [

@ -48,7 +48,7 @@ def trigger_docker_build(project, branch):
url = docker_url.format(**params)
if settings.DEBUG:
logger.debug("Debug mode, would trigger: %s", url)
logger.debug(f"Debug mode, would trigger: {url}")
else:
open_jenkins_url(url)
return "{base}/job/build-project-docker/".format(**params)
@ -56,13 +56,13 @@ def trigger_docker_build(project, branch):
def _get_info(url, headers=None):
if settings.DEBUG:
logger.debug("Debug mode, would trigger: %s", url)
logger.debug(f"Debug mode, would trigger: {url}")
else:
if headers:
logger.debug("trigger: %s, headers: '%s'", url, headers)
logger.debug(f"trigger: {url}, headers: '{headers}'")
response = requests.get(url, headers)
else:
logger.debug("trigger: %s", url)
logger.debug(f"trigger: {url}")
response = requests.get(url)
logger.debug("response: %s" % response)
response.raise_for_status()
@ -84,11 +84,11 @@ def get_docker_manifests_info(url):
def delete_docker_info(url):
if settings.DEBUG:
logger.debug("Debug mode, would trigger: %s", url)
logger.debug(f"Debug mode, would trigger: {url}")
else:
logger.debug("trigger: %s", url)
logger.debug(f"trigger:{url}")
response = requests.delete(url)
logger.debug("response: %s" % response)
logger.debug(f"response: {response}")
response.raise_for_status()
return
@ -101,7 +101,7 @@ def get_docker_repositories():
url = settings.DOCKER_REGISTRY_URL.format("_catalog")
try:
info = get_docker_info(url)
logger.debug("response: %s" % info)
logger.debug(f"response: {info}")
result = json.loads(info)
return result["repositories"]
except Exception as e:
@ -116,14 +116,14 @@ def get_docker_tags(image):
except KeyError:
return []
else:
url = settings.DOCKER_REGISTRY_URL.format("%s/tags/list" % image)
url = settings.DOCKER_REGISTRY_URL.format(f"{image}/tags/list")
try:
info = get_docker_info(url)
logger.debug("response: %s" % info)
result = json.loads(info)
return result["tags"]
except Exception as e:
logger.error("image: %s %s" % (image, e))
logger.error(f"image: {image} {e}")
return []
@ -132,34 +132,34 @@ def get_docker_manifests(image, tag):
return ("{}", uuid.uuid4())
else:
dru = settings.DOCKER_REGISTRY_URL
url = dru.format("%s/manifests/%s" % (image, tag))
url = dru.format(f"{image}/manifests/{tag}")
try:
info, digest = get_docker_manifests_info(url)
logger.debug("response: %s" % info)
logger.debug(f"response: {info}")
result = json.loads(info)
return (result, digest)
except Exception as e:
logger.error("image: %s tag:%s %s" % (image, tag, e))
logger.error(f"image: {image} tag:{tag} {e}")
return (None, None)
def delete_tag(image, reference, tag_name):
try:
dru = settings.DOCKER_REGISTRY_URL
url = dru.format("%s/manifests/%s" % (image, reference))
logger.debug("docker delete_tag(%s)" % (url))
url = dru.format(f"{image}/manifests/{reference}")
logger.debug(f"docker delete_tag({url})")
delete_docker_info(url)
except Exception:
# it does not work for some, retrieve Docker-Content-Digest from
# manifests and delete using that as reference
dru = settings.DOCKER_REGISTRY_URL
url = dru.format("%s/manifests/%s" % (image, tag_name))
logger.debug("docker delete_tag() get_docker_manif_info(): %s" % (url))
url = dru.format(f"{image}/manifests/{tag_name}")
logger.debug(f"docker delete_tag() get_docker_manif_info(): {url}")
response = get_docker_manifests_info(url)
logger.debug(" - response text: %s" % (response[0]))
logger.debug(" - response Docker-Content-Digest: %s" % (response[1]))
dru = settings.DOCKER_REGISTRY_URL
url = dru.format("%s/manifests/%s" % (image, response[1]))
logger.info("docker delete_tag() will delete: %s" % (url))
logger.info(f"docker delete_tag() will delete: {url}")
delete_docker_info(url)

@ -84,12 +84,12 @@ def build_release(request, release):
@permission_required("build.can_trigger_hotfix", raise_exception=True)
def hotfix_build(request, branch, project):
if project not in settings.RELEASE_DASHBOARD_PROJECTS:
error = "repo:%s not valid" % project
error = f"repo:{project} not valid"
logger.error(error)
return HttpResponseNotFound(error)
if not regex_hotfix.match(branch):
error = "branch:%s not valid. Not mrX.X.X format" % branch
error = f"branch:{branch} not valid. Not mrX.X.X format"
logger.error(error)
return HttpResponseNotFound(error)
proj = Project.objects.get(name=project)
@ -103,7 +103,7 @@ def hotfix_build(request, branch, project):
push = json_data.get("push", "no")
empty = json_data.get("empty", False)
if push == "no":
logger.warn("dry-run for %s:%s", project, branch)
logger.warn(f"dry-run for {project}:{branch}")
urls = build.trigger_hotfix(project, branch, request.user, push, empty)
return JsonResponse({"urls": urls})
@ -132,7 +132,7 @@ def hotfix_release_build(request, release, project):
error = f"branch:{release_config.branch} not valid. Not mrX.X.X format"
logger.error(error)
return HttpResponseNotFound(error)
logger.debug(body=request.body)
structlog.contextvars.bind_contextvars(body=request.body)
json_data = json.loads(request.body.decode("utf-8"))
push = json_data.get("push", "no")
empty = json_data.get("empty", False)

@ -44,19 +44,19 @@ logger = structlog.get_logger(__name__)
def _get_docker_tags(project, tag=None):
repos = docker.get_docker_repositories()
r = re.compile(".*%s.*" % project)
r = re.compile(f".*{project}.*")
project_repos = filter(r.match, repos)
logger.debug("%s: %s" % (project, project_repos))
logger.debug(f"{project}: {project_repos}")
docker_tags = []
for image in project_repos:
res = {"name": image}
tags = docker.get_docker_tags(image)
if tag:
logger.debug("non filtered tags: %s" % tags)
logger.debug(f"non filtered tags: {tags}")
tags = filter(re.compile(tag).match, tags)
res["tags"] = tags
docker_tags.append(res)
logger.debug("docker_tags: %s" % docker_tags)
logger.debug(f"docker_tags: {docker_tags}")
return docker_tags
@ -73,8 +73,8 @@ def _build_docker_logic(form, projects):
url = docker.trigger_docker_build(pro, result[pro])
context["projects"].append({"name": pro, "url": url})
except KeyError:
msg = "Houston, we have a problem with trigger for %s"
logger.error(msg, pro)
msg = f"Houston, we have a problem with trigger for {pro}"
logger.error(msg)
context["projects"].append({"name": pro, "url": None})
return context

@ -72,6 +72,7 @@ def receiver_setup_logging(loglevel, logfile, format, colorize, **kwargs):
structlog.configure(
processors=[
structlog.contextvars.merge_contextvars,
structlog.stdlib.filter_by_level,
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
@ -82,9 +83,7 @@ def receiver_setup_logging(loglevel, logfile, format, colorize, **kwargs):
structlog.processors.ExceptionPrettyPrinter(),
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
],
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)

@ -162,7 +162,9 @@ LOGGING = {
structlog.configure(
processors=[
structlog.contextvars.merge_contextvars,
structlog.stdlib.filter_by_level,
structlog.processors.TimeStamper(fmt="iso"),
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
@ -172,9 +174,7 @@ structlog.configure(
structlog.processors.ExceptionPrettyPrinter(),
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
],
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)

@ -40,11 +40,8 @@ def jbi_manage(sender, **kwargs):
def gerrit_repo_add(instance):
log = logger.bind(
instance=str(instance),
)
if instance.param_ppa == "$ppa":
log.warn("ppa unset, skip removal")
logger.warn("ppa unset, skip removal")
return
GerritRepoInfo = apps.get_model("repoapi", "GerritRepoInfo")
gri = GerritRepoInfo.objects
@ -54,19 +51,16 @@ def gerrit_repo_add(instance):
defaults={"projectname": instance.projectname},
)
if created:
log.debug("ppa created", ppa=str(ppa))
logger.debug("ppa created", ppa=str(ppa))
elif ppa.projectname == "unknown":
ppa.projectname = instance.projectname
ppa.save()
log.info("ppa projectname updated")
logger.info("ppa projectname updated")
def gerrit_repo_del(instance):
log = logger.bind(
instance=str(instance),
)
if instance.param_ppa == "$ppa":
log.warn("ppa unset, skip removal")
logger.warn("ppa unset, skip removal")
return
GerritRepoInfo = apps.get_model("repoapi", "GerritRepoInfo")
gri = GerritRepoInfo.objects
@ -75,7 +69,7 @@ def gerrit_repo_del(instance):
param_ppa=instance.param_ppa, gerrit_change=instance.gerrit_change
)
ppa.delete()
log.debug("removed ppa", ppa=str(ppa))
logger.debug("removed ppa", ppa=str(ppa))
except GerritRepoInfo.DoesNotExist:
pass
qs = gri.filter(param_ppa=instance.param_ppa)
@ -84,10 +78,10 @@ def gerrit_repo_del(instance):
if ppa_count == 0:
utils.jenkins_remove_ppa(instance.param_ppa)
elif project_ppa_count == 0:
log.debug("remove source+packages from ppa")
logger.debug("remove source+packages from ppa")
jenkins_remove_project.delay(instance.id)
else:
log.debug(
logger.debug(
"nothing to do here",
ppa_count=ppa_count,
project_ppa_count=project_ppa_count,
@ -102,12 +96,12 @@ def gerrit_repo_del(instance):
def gerrit_repo_manage(sender, **kwargs):
if kwargs["created"]:
instance = kwargs["instance"]
log = logger.bind(
structlog.contextvars.bind_contextvars(
instance=str(instance),
ppa=instance.param_ppa,
)
if instance.param_ppa == "$ppa":
log.warn("ppa unset, skip")
logger.warn("ppa unset, skip")
return
if (
instance.jobname.endswith("-repos")
@ -123,14 +117,14 @@ def gerrit_repo_manage(sender, **kwargs):
and instance.result == "SUCCESS"
and instance.gerrit_eventtype == "change-abandoned"
):
log.debug("we need to count this")
logger.debug("we need to count this")
gerrit_repo_del(instance)
def tracker_release_target(instance, note: NoteInfo):
if not is_ngcp_project(instance.projectname):
logger.info(
"%s not a NGCP project, skip release_target", instance.projectname
"{instance.projectname} not a NGCP project, skip release_target"
)
return
branch = instance.param_branch

@ -37,7 +37,7 @@ logger = structlog.get_logger(__name__)
def jenkins_remove_project(self, jbi_id):
JenkinsBuildInfo = apps.get_model("repoapi", "JenkinsBuildInfo")
jbi = JenkinsBuildInfo.objects.get(id=jbi_id)
log = logger.bind(
structlog.contextvars.bind_contextvars(
jbi=str(jbi),
)
if (
@ -48,42 +48,33 @@ def jenkins_remove_project(self, jbi_id):
try:
jenkins_remove_project_ppa(jbi.param_ppa, jbi.source)
except FileNotFoundError as exc:
log.warn("source is not there yet, try again in 60 secs")
logger.warn("source is not there yet, try again in 60 secs")
raise self.retry(exc=exc, countdown=60)
@shared_task(ignore_result=True)
def jbi_get_artifact(jbi_id, jobname, buildnumber, artifact_info):
log = logger.bind(
jbi_id=jbi_id,
jobname=jobname,
)
path = jenkins_get_artifact(jobname, buildnumber, artifact_info)
if path.name == settings.HOTFIX_ARTIFACT:
if settings.TRACKER_PROVIDER == Tracker.NONE:
log.info("no tracker defined, skip hotfix management")
logger.info("no tracker defined, skip hotfix management")
return
jbi_parse_hotfix.delay(jbi_id, str(path))
@shared_task(ignore_result=True)
def get_jbi_files(jbi_id, jobname, buildnumber):
log = logger.bind(
jbi_id=jbi_id,
jobname=jobname,
buildnumber=buildnumber,
)
jenkins_get_console(jobname, buildnumber)
path_envVars = jenkins_get_env(jobname, buildnumber)
path_build = jenkins_get_build(jobname, buildnumber)
if is_download_artifacts(jobname):
with open(path_build) as data_file:
data = json.load(data_file)
log.debug("job_info", data=data)
logger.debug("job_info", data=data)
for artifact in data["artifacts"]:
jbi_get_artifact.delay(jbi_id, jobname, buildnumber, artifact)
else:
log.debug("skip artifacts download")
logger.debug("skip artifacts download")
if jobname in settings.RELEASE_CHANGED_JOBS:
process_result.delay(jbi_id, str(path_envVars))
@ -92,4 +83,4 @@ def get_jbi_files(jbi_id, jobname, buildnumber):
def jbi_purge(release, weeks):
JenkinsBuildInfo = apps.get_model("repoapi", "JenkinsBuildInfo")
JenkinsBuildInfo.objects.purge_release(release, timedelta(weeks=weeks))
logger.info("purged release %s jbi older than %s weeks" % (release, weeks))
logger.info(f"purged release {release} jbi older than {weeks} weeks")

@ -31,15 +31,11 @@ JBI_ENVVARS_URL = "{}/job/{}/{}/injectedEnvVars/api/json"
def executeAndReturnOutput(command, env=None):
log = logger.bind(
command=command,
env=env,
)
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
stdoutdata, stderrdata = proc.communicate()
log.debug("command done", stdout=stdoutdata, stderr=stderrdata)
logger.debug("command done", stdout=stdoutdata, stderr=stderrdata)
return proc.returncode, stdoutdata, stderrdata
@ -53,17 +49,17 @@ def get_jenkins_response(url):
def dlfile(url, path: Path):
log = logger.bind(
structlog.contextvars.bind_contextvars(
url=url,
path=str(path),
)
if settings.DEBUG:
log.info("_NOT_ calling due to DEBUG is set")
logger.info("_NOT_ calling due to DEBUG is set")
else:
auth = HTTPBasicAuth(
settings.JENKINS_HTTP_USER, settings.JENKINS_HTTP_PASSWD
)
log.debug("get request")
logger.debug("get request")
req = requests.get(url, auth=auth)
with open(path, "wb") as local_file:
for chunk in req.iter_content(chunk_size=128):
@ -71,18 +67,15 @@ def dlfile(url, path: Path):
def open_jenkins_url(url):
log = logger.bind(
url=url,
)
log.debug("Trying to retrieve")
logger.debug("Trying to retrieve")
try:
res = get_jenkins_response(url)
log.debug("OK", status_code=res.status_code)
logger.debug("OK", status_code=res.status_code)
return True
except requests.HTTPError as e:
log.error("Error %s", e, status_code=res.status_code)
logger.error("Error %s", e, status_code=res.status_code)
except Exception as e:
log.error("Fatal error retrieving:", error=str(e))
logger.error("Fatal error retrieving:", error=str(e))
return False
@ -93,12 +86,11 @@ def jenkins_remove_ppa(repo):
"token=%s&repository=%s"
% (settings.JENKINS_URL, settings.JENKINS_TOKEN, repo)
)
log = logger.bind(
repo=repo,
structlog.contextvars.bind_contextvars(
url=url,
)
if settings.DEBUG:
log.debug("_NOT_ calling due to DEBUG is set")
logger.debug("_NOT_ calling due to DEBUG is set")
else:
open_jenkins_url(url)
@ -109,15 +101,13 @@ def jenkins_remove_project_ppa(repo, source):
"token=%s&repository=%s&source=%s"
% (settings.JENKINS_URL, settings.JENKINS_TOKEN, repo, source)
)
log = logger.bind(
repo=repo,
source=source,
structlog.contextvars.bind_contextvars(
url=url,
)
if source is None:
raise FileNotFoundError()
if settings.DEBUG:
log.debug("_NOT_ calling due to DEBUG is set")
logger.debug("_NOT_ calling due to DEBUG is set")
else:
open_jenkins_url(url)
@ -125,12 +115,12 @@ def jenkins_remove_project_ppa(repo, source):
def _jenkins_get(url, base_path: Path, filename) -> Path:
base_path.mkdir(parents=True, exist_ok=True)
path = base_path.joinpath(filename)
log = logger.bind(
structlog.contextvars.bind_contextvars(
base_path=str(base_path),
filename=filename,
url=url,
)
log.debug("download file from jenkins")
logger.debug("download file from jenkins")
dlfile(url, path)
return path

@ -62,11 +62,6 @@ def workfront_set_release_target(_id, release):
def mantis_query(method, url, payload=None) -> requests.Response:
logger.bind(
method=method,
url=url,
payload=payload,
)
response = requests.request(
f"{method}", url, headers=MANTIS_HEADERS, data=payload
)

Loading…
Cancel
Save